hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
4da4e7a2668b357f440f56f8318e39a347cf6cd2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "kernel.h"
#include "hip/hip_runtime.h"
#include<math.h>
double *d_answer;
double *h_answer;
double *hb_answer;
void Allocate_Memory()
{
hipError_t Error;
size_t size = 5 * sizeof(double);
h_answer = (double*)malloc(size);
hb_answer = (double*)malloc(size);
Error = hipMalloc((void**)&d_answer, size);
printf("cuda malloc error message is %s", hipGetErrorString(Error));
}
void Free_Memory()
{
if (h_answer) free(h_answer);
if (hb_answer) free(hb_answer);
if (d_answer) hipFree(d_answer);
}
void Send_To_Device()
{
size_t size = 5 * sizeof(double);
hipError_t Error;
Error = hipMemcpy(d_answer, h_answer, size, hipMemcpyHostToDevice);
printf("\nCUDA error(memcpy h_answer->d_answer)=%s\n", hipGetErrorString(Error));
}
void Get_From_Memory()
{
size_t size = 5 * sizeof(double);
hipError_t Error;
Error = hipMemcpy(hb_answer, d_answer, size, hipMemcpyDeviceToHost);
printf("\nCUDA error(memcpy d_answer->h_answer)=%s\n", hipGetErrorString(Error));
}
__global__ void hardy_cross(double * _a)
{
double Q12 = 10;
double Q13 = 0;
double Q23 = 0;
double Q24 = 10;
double Q34 = 0;
double r12 = 5;
double r13 = 1;
double r23 = 1;
double r24 = 1;
double r34 = 5;
double dQ_1;
double dQ_2;
int c12 = 1;
int c13 = 1;
int c23 = 1;
int c24 = 1;
int c34 = 1;
int no_iteraion = 100;
for (int i = 0; i < no_iteraion; i++)
{
c12 = Q12 > 0 ? 1 : -1;
c13 = Q13 > 0 ? 1 : -1;
c23 = Q23 > 0 ? 1 : -1;
c24 = Q24 > 0 ? 1 : -1;
c34 = Q34 > 0 ? 1 : -1;
dQ_1 = -(c12*r12 *Q12 *Q12 + c23*r23*Q23*Q23 - c13*r13*Q13*Q13) / (2 * r12*fabs(Q12) + 2 * r23*fabs(Q23) + 2 * r13*fabs(Q13));
dQ_2 = -(c24*r24 *Q24 *Q24 - c23*r23*Q23*Q23 - c34*r34*Q34*Q34) / (2 * r24*fabs(Q24) + 2 * r23*fabs(Q23) + 2 * r34*fabs(Q34));
Q12 = Q12 + dQ_1;
Q23 = Q23 + dQ_1 - dQ_2;
Q13 = Q13 - dQ_1;
Q24 = Q24 + dQ_2;
Q34 = Q34 - dQ_2;
}
_a[0] = Q12;
_a[1] = Q13;
_a[2] = Q23;
_a[3] = Q24;
_a[4] = Q34;
}
void Launch_hardy_cross()
{
hardy_cross << <1, 1 >> > (d_answer);
}
|
4da4e7a2668b357f440f56f8318e39a347cf6cd2.cu
|
#include "kernel.h"
#include "cuda_runtime.h"
#include<math.h>
double *d_answer;
double *h_answer;
double *hb_answer;
void Allocate_Memory()
{
cudaError_t Error;
size_t size = 5 * sizeof(double);
h_answer = (double*)malloc(size);
hb_answer = (double*)malloc(size);
Error = cudaMalloc((void**)&d_answer, size);
printf("cuda malloc error message is %s", cudaGetErrorString(Error));
}
void Free_Memory()
{
if (h_answer) free(h_answer);
if (hb_answer) free(hb_answer);
if (d_answer) cudaFree(d_answer);
}
void Send_To_Device()
{
size_t size = 5 * sizeof(double);
cudaError_t Error;
Error = cudaMemcpy(d_answer, h_answer, size, cudaMemcpyHostToDevice);
printf("\nCUDA error(memcpy h_answer->d_answer)=%s\n", cudaGetErrorString(Error));
}
void Get_From_Memory()
{
size_t size = 5 * sizeof(double);
cudaError_t Error;
Error = cudaMemcpy(hb_answer, d_answer, size, cudaMemcpyDeviceToHost);
printf("\nCUDA error(memcpy d_answer->h_answer)=%s\n", cudaGetErrorString(Error));
}
__global__ void hardy_cross(double * Άμ_a)
{
double Q12 = 10;
double Q13 = 0;
double Q23 = 0;
double Q24 = 10;
double Q34 = 0;
double r12 = 5;
double r13 = 1;
double r23 = 1;
double r24 = 1;
double r34 = 5;
double dQ_1;
double dQ_2;
int c12 = 1;
int c13 = 1;
int c23 = 1;
int c24 = 1;
int c34 = 1;
int no_iteraion = 100;
for (int i = 0; i < no_iteraion; i++)
{
c12 = Q12 > 0 ? 1 : -1;
c13 = Q13 > 0 ? 1 : -1;
c23 = Q23 > 0 ? 1 : -1;
c24 = Q24 > 0 ? 1 : -1;
c34 = Q34 > 0 ? 1 : -1;
dQ_1 = -(c12*r12 *Q12 *Q12 + c23*r23*Q23*Q23 - c13*r13*Q13*Q13) / (2 * r12*fabs(Q12) + 2 * r23*fabs(Q23) + 2 * r13*fabs(Q13));
dQ_2 = -(c24*r24 *Q24 *Q24 - c23*r23*Q23*Q23 - c34*r34*Q34*Q34) / (2 * r24*fabs(Q24) + 2 * r23*fabs(Q23) + 2 * r34*fabs(Q34));
Q12 = Q12 + dQ_1;
Q23 = Q23 + dQ_1 - dQ_2;
Q13 = Q13 - dQ_1;
Q24 = Q24 + dQ_2;
Q34 = Q34 - dQ_2;
}
Άμ_a[0] = Q12;
Άμ_a[1] = Q13;
Άμ_a[2] = Q23;
Άμ_a[3] = Q24;
Άμ_a[4] = Q34;
}
void Launch_hardy_cross()
{
hardy_cross << <1, 1 >> > (d_answer);
}
|
226e19ac4065044ac962b49d05ff5220ea3be4a3.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
Collatz code for CS 4380 / CS 5351
Copyright (c) 2019 Texas State University. All rights reserved.
Redistribution in source or binary form, with or without modification,
is *not* permitted. Use in source and binary forms, with or without
modification, is only permitted for academic use in CS 4380 or CS 5351
at Texas State University.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Author: Martin Burtscher
*/
#include <cstdio>
#include <hip/hip_runtime.h>
static const int ThreadsPerBlock = 512;
static int* d_maxlen;
static __global__ void collatz(const long start, const long stop, int* const maxlen)
{
// todo: process odd values from start (assume start to be odd) to stop (inclusively if stop is odd) with one thread per value (based on code from previous project)
const long i = 2 * (threadIdx.x + blockIdx.x * (long)blockDim.x) + start;
if(i <= stop)
{
long val = i;
int len = 1;
while (val != 1) {
len++;
if ((val % 2) == 0) {
val = val / 2; // even
} else {
val = 3 * val + 1; // odd
}
}
if(len > *maxlen)
{
atomicMax(maxlen, len);
}
}
}
void GPU_Init()
{
int maxlen = 0;
if (hipSuccess != hipMalloc((void **)&d_maxlen, sizeof(int))) {fprintf(stderr, "ERROR: could not allocate memory\n"); exit(-1);}
if (hipSuccess != hipMemcpy(d_maxlen, &maxlen, sizeof(int), hipMemcpyHostToDevice)) {fprintf(stderr, "ERROR: copying to device failed\n"); exit(-1);}
}
void GPU_Exec(const long start, const long stop)
{
if (start <= stop) {
hipLaunchKernelGGL(( collatz), dim3(((stop - start + 2) / 2 + ThreadsPerBlock - 1) / ThreadsPerBlock), dim3(ThreadsPerBlock), 0, 0, start, stop, d_maxlen);
}
}
int GPU_Fini()
{
//int maxlen;
int* const maxlen = new int [1];
maxlen[0] = 0;
// todo: copy the result from the device to the host and free the device memory
if (hipSuccess != hipMemcpy(maxlen, d_maxlen, sizeof(int), hipMemcpyDeviceToHost)) {fprintf(stderr, "ERROR: copying from device failed\n");
exit(-1);}
return maxlen[0];
}
|
226e19ac4065044ac962b49d05ff5220ea3be4a3.cu
|
/*
Collatz code for CS 4380 / CS 5351
Copyright (c) 2019 Texas State University. All rights reserved.
Redistribution in source or binary form, with or without modification,
is *not* permitted. Use in source and binary forms, with or without
modification, is only permitted for academic use in CS 4380 or CS 5351
at Texas State University.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Author: Martin Burtscher
*/
#include <cstdio>
#include <cuda.h>
static const int ThreadsPerBlock = 512;
static int* d_maxlen;
static __global__ void collatz(const long start, const long stop, int* const maxlen)
{
// todo: process odd values from start (assume start to be odd) to stop (inclusively if stop is odd) with one thread per value (based on code from previous project)
const long i = 2 * (threadIdx.x + blockIdx.x * (long)blockDim.x) + start;
if(i <= stop)
{
long val = i;
int len = 1;
while (val != 1) {
len++;
if ((val % 2) == 0) {
val = val / 2; // even
} else {
val = 3 * val + 1; // odd
}
}
if(len > *maxlen)
{
atomicMax(maxlen, len);
}
}
}
void GPU_Init()
{
int maxlen = 0;
if (cudaSuccess != cudaMalloc((void **)&d_maxlen, sizeof(int))) {fprintf(stderr, "ERROR: could not allocate memory\n"); exit(-1);}
if (cudaSuccess != cudaMemcpy(d_maxlen, &maxlen, sizeof(int), cudaMemcpyHostToDevice)) {fprintf(stderr, "ERROR: copying to device failed\n"); exit(-1);}
}
void GPU_Exec(const long start, const long stop)
{
if (start <= stop) {
collatz<<<((stop - start + 2) / 2 + ThreadsPerBlock - 1) / ThreadsPerBlock, ThreadsPerBlock>>>(start, stop, d_maxlen);
}
}
int GPU_Fini()
{
//int maxlen;
int* const maxlen = new int [1];
maxlen[0] = 0;
// todo: copy the result from the device to the host and free the device memory
if (cudaSuccess != cudaMemcpy(maxlen, d_maxlen, sizeof(int), cudaMemcpyDeviceToHost)) {fprintf(stderr, "ERROR: copying from device failed\n");
exit(-1);}
return maxlen[0];
}
|
10752019161baf18cdf3246864a35c095244f55b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = 0;
for (int j = 0; j < 200000; j++) {
c[i] += a[i] + b[i];
}
}
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
hipError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
}
|
10752019161baf18cdf3246864a35c095244f55b.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = 0;
for (int j = 0; j < 200000; j++) {
c[i] += a[i] + b[i];
}
}
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
576efda4901cbd467af9847b4d514c9096a0c564.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2017 Max Planck Society
// Distributed under the BSD-3 Software license,
// (See accompanying file LICENSE.txt or copy at
// https://opensource.org/licenses/BSD-3-Clause)
#include <cfloat>
#include <vector>
#include "caffe/layers/warp_layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/util/gpu_util.cuh"
namespace caffe {
template <typename Dtype>
__global__ void truncate_interp2_fwd(const int nthreads, const Dtype *bottom_0_data_, const Dtype *bottom_1_data_,
const int num_, const int channels_, const int height_, const int width_,
Dtype *theta_data, Dtype* theta_data_, Dtype *x_w_data, Dtype *top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int temp = 0;
const int n = index / (channels_ * height_ * width_);
temp = index % (channels_ * height_ * width_);
const int c = temp / (height_ * width_);
temp = temp % (height_ * width_);
const int h = temp / width_;
const int w = temp % width_;
int index_x = ((n * 2 + 1) * height_ + h) * width_ + w;
int index_y = ((n * 2 + 0) * height_ + h) * width_ + w;
x_w_data[ index_x ] = h + bottom_1_data_[ index_x ];
x_w_data[ index_y ] = w + bottom_1_data_[ index_y ];
int xw_floor = (int)floor(x_w_data[ index_x ]);
int yw_floor = (int)floor(x_w_data[ index_y ]);
int xw_ceil = (int)ceil(x_w_data[ index_x ]);
int yw_ceil = (int)ceil(x_w_data[ index_y ]);
theta_data[ index_x ] = x_w_data[ index_x ] - floor(x_w_data[ index_x ]);
theta_data[ index_y ] = x_w_data[ index_y ] - floor(x_w_data[ index_y ]);
theta_data_[ index_x ] = 1 - theta_data[ index_x ];
theta_data_[ index_y ] = 1 - theta_data[ index_y ];
int offset = (n * channels_ + c) * height_;
if (x_w_data[ index_x ] >= 0 && x_w_data[ index_x ] <= height_-1 &&
x_w_data[ index_y ] >= 0 && x_w_data[ index_y ] <= width_-1) {
Dtype I0 = bottom_0_data_[ (offset + xw_floor) * width_ + yw_floor ];
Dtype I1 = bottom_0_data_[ (offset + xw_ceil ) * width_ + yw_floor ];
Dtype I2 = bottom_0_data_[ (offset + xw_floor) * width_ + yw_ceil ];
Dtype I3 = bottom_0_data_[ (offset + xw_ceil ) * width_ + yw_ceil ];
top_data[ (offset + h) * width_ + w ] = (theta_data_[index_x] * theta_data_[index_y] * I0) +
(theta_data[index_x] * theta_data_[index_y] * I1) +
(theta_data_[index_x] * theta_data[index_y] * I2) +
(theta_data[index_x] * theta_data[index_y] * I3);
}
}
}
template <typename Dtype>
__global__ void nearest_interp2_fwd(const int nthreads, const Dtype *bottom_0_data_, const Dtype *bottom_1_data_,
const int num_, const int channels_, const int height_, const int width_,
Dtype *theta_data, Dtype* theta_data_, Dtype *x_w_data, Dtype *top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int temp = 0;
const int n = index / (channels_ * height_ * width_);
temp = index % (channels_ * height_ * width_);
const int c = temp / (height_ * width_);
temp = temp % (height_ * width_);
const int h = temp / width_;
const int w = temp % width_;
int index_x = ((n * 2 + 1) * height_ + h) * width_ + w;
int index_y = ((n * 2 + 0) * height_ + h) * width_ + w;
x_w_data[ index_x ] = h + bottom_1_data_[ index_x ];
x_w_data[ index_y ] = w + bottom_1_data_[ index_y ];
int xw_floor = (int)floor(x_w_data[ index_x ]);
int yw_floor = (int)floor(x_w_data[ index_y ]);
int xw_ceil = (int)ceil(x_w_data[ index_x ]);
int yw_ceil = (int)ceil(x_w_data[ index_y ]);
theta_data[ index_x ] = x_w_data[ index_x ] - floor(x_w_data[ index_x ]);
theta_data[ index_y ] = x_w_data[ index_y ] - floor(x_w_data[ index_y ]);
if (x_w_data[ index_x ] < 0) {
theta_data[ index_x ] = x_w_data[ index_x ];
xw_floor = 0; xw_ceil = 0;
}
if (x_w_data[ index_x ] >= height_-1) {
theta_data[ index_x ] = x_w_data[ index_x ] - height_;
xw_floor = height_-1; xw_ceil = height_-1;
}
if (x_w_data[ index_y ] < 0) {
theta_data[ index_y ] = x_w_data[ index_y ];
yw_floor = 0; yw_ceil = 0;
}
if (x_w_data[ index_y ] >= width_-1) {
theta_data[ index_y ] = x_w_data[ index_y ] - width_;
yw_floor = width_-1; yw_ceil = width_-1;
}
theta_data_[ index_x ] = 1 - theta_data[ index_x ];
theta_data_[ index_y ] = 1 - theta_data[ index_y ];
int offset = (n * channels_ + c) * height_;
Dtype I0 = bottom_0_data_[ (offset + xw_floor) * width_ + yw_floor ];
Dtype I1 = bottom_0_data_[ (offset + xw_ceil ) * width_ + yw_floor ];
Dtype I2 = bottom_0_data_[ (offset + xw_floor) * width_ + yw_ceil ];
Dtype I3 = bottom_0_data_[ (offset + xw_ceil ) * width_ + yw_ceil ];
top_data[ (offset + h) * width_ + w ] = (theta_data_[index_x] * theta_data_[index_y] * I0) +
(theta_data[index_x] * theta_data_[index_y] * I1) +
(theta_data_[index_x] * theta_data[index_y] * I2) +
(theta_data[index_x] * theta_data[index_y] * I3);
}
}
template <typename Dtype>
__global__ void truncate_interp2_bwd(const int nthreads, const int num_, const int channels_, const int height_,
const int width_, const Dtype *theta_data, const Dtype* theta_data_,
const Dtype *x_w_data, Dtype *bottom_0_diff, Dtype *bottom_1_diff,
const Dtype *top_diff, const Dtype *top_data, const Dtype *bottom_0_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int temp = 0;
const int n = index / (channels_ * height_ * width_);
temp = index % (channels_ * height_ * width_);
const int c = temp / (height_ * width_);
temp = temp % (height_ * width_);
const int h = temp / width_;
const int w = temp % width_;
int index_x = ((n * 2 + 1) * height_ + h) * width_ + w;
int index_y = ((n * 2 + 0) * height_ + h) * width_ + w;
if (!(x_w_data[ index_x ] < 0 || x_w_data[ index_x ] > height_-1 ||
x_w_data[ index_y ] < 0 || x_w_data[ index_y ] > width_-1)) {
int xw_floor = (int)floor(x_w_data[ index_x ]);
int yw_floor = (int)floor(x_w_data[ index_y ]);
int xw_ceil = (int)ceil(x_w_data[ index_x ]);
int yw_ceil = (int)ceil(x_w_data[ index_y ]);
int bottom_0_index = ((n * channels_ + c) * height_ + h) * width_ + w;
int offset = (n * channels_ + c) * height_;
Dtype I0 = bottom_0_data[ (offset + xw_floor) * width_ + yw_floor ];
Dtype I1 = bottom_0_data[ (offset + xw_ceil ) * width_ + yw_floor ];
Dtype I2 = bottom_0_data[ (offset + xw_floor) * width_ + yw_ceil ];
Dtype I3 = bottom_0_data[ (offset + xw_ceil ) * width_ + yw_ceil ];
bottom_1_diff[ index_x ] += ( -1*theta_data_[index_y]*I0 +
theta_data_[index_y]*I1 -
theta_data[index_y] *I2 +
theta_data[index_y] *I3 ) *
top_diff[(offset + h) * width_ + w];
bottom_1_diff[ index_y ] += ( -1*theta_data_[index_x]*I0 -
theta_data[index_x] *I1 +
theta_data_[index_x]*I2 +
theta_data[index_x] *I3 ) *
top_diff[(offset + h) * width_ + w];
caffe_gpu_atomic_add((Dtype) theta_data_[ index_x ]*theta_data_[ index_y ]*top_diff[bottom_0_index],
bottom_0_diff + ((offset + xw_floor) * width_ + yw_floor ));
caffe_gpu_atomic_add((Dtype) theta_data[ index_x ] *theta_data_[ index_y ]*top_diff[bottom_0_index],
bottom_0_diff + ((offset + xw_ceil ) * width_ + yw_floor ));
caffe_gpu_atomic_add((Dtype) theta_data_[ index_x ]*theta_data[ index_y ] *top_diff[bottom_0_index],
bottom_0_diff + ((offset + xw_floor) * width_ + yw_ceil ));
caffe_gpu_atomic_add((Dtype) theta_data[ index_x ] *theta_data[ index_y ] *top_diff[bottom_0_index],
bottom_0_diff + ((offset + xw_ceil ) * width_ + yw_ceil ));
}
}
}
template <typename Dtype>
__global__ void nearest_interp2_bwd(const int nthreads, const int num_, const int channels_, const int height_,
const int width_, const Dtype *theta_data, const Dtype* theta_data_,
const Dtype *x_w_data, Dtype *bottom_0_diff, Dtype *bottom_1_diff,
const Dtype *top_diff, const Dtype *top_data, const Dtype *bottom_0_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int temp = 0;
const int n = index / (channels_ * height_ * width_);
temp = index % (channels_ * height_ * width_);
const int c = temp / (height_ * width_);
temp = temp % (height_ * width_);
const int h = temp / width_;
const int w = temp % width_;
int index_x = ((n * 2 + 1) * height_ + h) * width_ + w;
int index_y = ((n * 2 + 0) * height_ + h) * width_ + w;
int xw_floor = (int)floor(x_w_data[ index_x ]);
int yw_floor = (int)floor(x_w_data[ index_y ]);
int xw_ceil = (int)ceil(x_w_data[ index_x ]);
int yw_ceil = (int)ceil(x_w_data[ index_y ]);
if (x_w_data[ index_x ] < 0) {
xw_floor = 0; xw_ceil = 0;
}
if (x_w_data[ index_x ] >= height_-1) {
xw_floor = height_-1; xw_ceil = height_-1;
}
if (x_w_data[ index_y ] < 0) {
yw_floor = 0; yw_ceil = 0;
}
if (x_w_data[ index_y ] >= width_-1) {
yw_floor = width_-1; yw_ceil = width_-1;
}
int bottom_0_index = ((n * channels_ + c) * height_ + h) * width_ + w;
int offset = (n * channels_ + c) * height_;
Dtype I0 = bottom_0_data[ (offset + xw_floor) * width_ + yw_floor ];
Dtype I1 = bottom_0_data[ (offset + xw_ceil ) * width_ + yw_floor ];
Dtype I2 = bottom_0_data[ (offset + xw_floor) * width_ + yw_ceil ];
Dtype I3 = bottom_0_data[ (offset + xw_ceil ) * width_ + yw_ceil ];
bottom_1_diff[ index_x ] += ( -1*theta_data_[index_y]*I0 +
theta_data_[index_y]*I1 -
theta_data[index_y] *I2 +
theta_data[index_y] *I3 ) *
top_diff[(offset + h) * width_ + w];
bottom_1_diff[ index_y ] += ( -1*theta_data_[index_x]*I0 -
theta_data[index_x] *I1 +
theta_data_[index_x]*I2 +
theta_data[index_x] *I3 ) *
top_diff[(offset + h) * width_ + w];
caffe_gpu_atomic_add((Dtype) theta_data_[ index_x ]*theta_data_[ index_y ]*top_diff[bottom_0_index],
bottom_0_diff + ((offset + xw_floor) * width_ + yw_floor ));
caffe_gpu_atomic_add((Dtype) theta_data[ index_x ] *theta_data_[ index_y ]*top_diff[bottom_0_index],
bottom_0_diff + ((offset + xw_ceil ) * width_ + yw_floor ));
caffe_gpu_atomic_add((Dtype) theta_data_[ index_x ]*theta_data[ index_y ] *top_diff[bottom_0_index],
bottom_0_diff + ((offset + xw_floor) * width_ + yw_ceil ));
caffe_gpu_atomic_add((Dtype) theta_data[ index_x ] *theta_data[ index_y ] *top_diff[bottom_0_index],
bottom_0_diff + ((offset + xw_ceil ) * width_ + yw_ceil ));
}
}
template <typename Dtype>
void WarpLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data_0 = bottom[0]->gpu_data(); // image
const Dtype* bottom_data_1 = bottom[1]->gpu_data(); // optical flow
Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* theta_data = theta.mutable_gpu_data();
Dtype* theta_data_ = theta_.mutable_gpu_data();
Dtype* x_w_data = x_w.mutable_gpu_data();
const int num_kernels = num_ * channels_ * height_ * width_;
caffe_gpu_set(bottom[0]->count(), (Dtype)0., top_data);
switch (outliers_) {
case WarpParameter_WarpType_TRUNCATE:
hipLaunchKernelGGL(( truncate_interp2_fwd<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, bottom_data_0, bottom_data_1, num_, channels_, height_, width_,
theta_data, theta_data_, x_w_data, top_data);
break;
case WarpParameter_WarpType_NEAREST:
hipLaunchKernelGGL(( nearest_interp2_fwd<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, bottom_data_0, bottom_data_1, num_, channels_, height_, width_,
theta_data, theta_data_, x_w_data, top_data);
break;
}
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void WarpLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0] || propagate_down[1]) {
caffe_gpu_set(bottom[0]->count(), (Dtype)0., bottom[0]->mutable_gpu_diff());
caffe_gpu_set(bottom[1]->count(), (Dtype)0., bottom[1]->mutable_gpu_diff());
const Dtype* theta_data = theta.mutable_gpu_data();
const Dtype* theta_data_ = theta_.mutable_gpu_data();
const Dtype* x_w_data = x_w.mutable_gpu_data();
const Dtype* top_data = top[0]->gpu_data();
const Dtype* bottom_0_data = bottom[0]->gpu_data();
const Dtype* bottom_1_data = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_0_diff = bottom[0]->mutable_gpu_diff();
Dtype* bottom_1_diff = bottom[1]->mutable_gpu_diff();
const int num_kernels = num_ * channels_ * height_ * width_;
switch (outliers_) {
case WarpParameter_WarpType_NEAREST:
hipLaunchKernelGGL(( nearest_interp2_bwd<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, num_, channels_, height_, width_, theta_data, theta_data_, x_w_data,
bottom_0_diff, bottom_1_diff, top_diff, top_data, bottom_0_data);
break;
case WarpParameter_WarpType_TRUNCATE:
hipLaunchKernelGGL(( truncate_interp2_bwd<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, num_, channels_, height_, width_, theta_data, theta_data_, x_w_data,
bottom_0_diff, bottom_1_diff, top_diff, top_data, bottom_0_data);
break;
}
CUDA_POST_KERNEL_CHECK;
//caffe_gpu_mul(top[0]->count(), top_diff, bottom_0_diff, bottom_0_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(WarpLayer);
} // namespace caffe
|
576efda4901cbd467af9847b4d514c9096a0c564.cu
|
// Copyright 2017 Max Planck Society
// Distributed under the BSD-3 Software license,
// (See accompanying file LICENSE.txt or copy at
// https://opensource.org/licenses/BSD-3-Clause)
#include <cfloat>
#include <vector>
#include "caffe/layers/warp_layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/util/gpu_util.cuh"
namespace caffe {
template <typename Dtype>
__global__ void truncate_interp2_fwd(const int nthreads, const Dtype *bottom_0_data_, const Dtype *bottom_1_data_,
const int num_, const int channels_, const int height_, const int width_,
Dtype *theta_data, Dtype* theta_data_, Dtype *x_w_data, Dtype *top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int temp = 0;
const int n = index / (channels_ * height_ * width_);
temp = index % (channels_ * height_ * width_);
const int c = temp / (height_ * width_);
temp = temp % (height_ * width_);
const int h = temp / width_;
const int w = temp % width_;
int index_x = ((n * 2 + 1) * height_ + h) * width_ + w;
int index_y = ((n * 2 + 0) * height_ + h) * width_ + w;
x_w_data[ index_x ] = h + bottom_1_data_[ index_x ];
x_w_data[ index_y ] = w + bottom_1_data_[ index_y ];
int xw_floor = (int)floor(x_w_data[ index_x ]);
int yw_floor = (int)floor(x_w_data[ index_y ]);
int xw_ceil = (int)ceil(x_w_data[ index_x ]);
int yw_ceil = (int)ceil(x_w_data[ index_y ]);
theta_data[ index_x ] = x_w_data[ index_x ] - floor(x_w_data[ index_x ]);
theta_data[ index_y ] = x_w_data[ index_y ] - floor(x_w_data[ index_y ]);
theta_data_[ index_x ] = 1 - theta_data[ index_x ];
theta_data_[ index_y ] = 1 - theta_data[ index_y ];
int offset = (n * channels_ + c) * height_;
if (x_w_data[ index_x ] >= 0 && x_w_data[ index_x ] <= height_-1 &&
x_w_data[ index_y ] >= 0 && x_w_data[ index_y ] <= width_-1) {
Dtype I0 = bottom_0_data_[ (offset + xw_floor) * width_ + yw_floor ];
Dtype I1 = bottom_0_data_[ (offset + xw_ceil ) * width_ + yw_floor ];
Dtype I2 = bottom_0_data_[ (offset + xw_floor) * width_ + yw_ceil ];
Dtype I3 = bottom_0_data_[ (offset + xw_ceil ) * width_ + yw_ceil ];
top_data[ (offset + h) * width_ + w ] = (theta_data_[index_x] * theta_data_[index_y] * I0) +
(theta_data[index_x] * theta_data_[index_y] * I1) +
(theta_data_[index_x] * theta_data[index_y] * I2) +
(theta_data[index_x] * theta_data[index_y] * I3);
}
}
}
template <typename Dtype>
__global__ void nearest_interp2_fwd(const int nthreads, const Dtype *bottom_0_data_, const Dtype *bottom_1_data_,
const int num_, const int channels_, const int height_, const int width_,
Dtype *theta_data, Dtype* theta_data_, Dtype *x_w_data, Dtype *top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int temp = 0;
const int n = index / (channels_ * height_ * width_);
temp = index % (channels_ * height_ * width_);
const int c = temp / (height_ * width_);
temp = temp % (height_ * width_);
const int h = temp / width_;
const int w = temp % width_;
int index_x = ((n * 2 + 1) * height_ + h) * width_ + w;
int index_y = ((n * 2 + 0) * height_ + h) * width_ + w;
x_w_data[ index_x ] = h + bottom_1_data_[ index_x ];
x_w_data[ index_y ] = w + bottom_1_data_[ index_y ];
int xw_floor = (int)floor(x_w_data[ index_x ]);
int yw_floor = (int)floor(x_w_data[ index_y ]);
int xw_ceil = (int)ceil(x_w_data[ index_x ]);
int yw_ceil = (int)ceil(x_w_data[ index_y ]);
theta_data[ index_x ] = x_w_data[ index_x ] - floor(x_w_data[ index_x ]);
theta_data[ index_y ] = x_w_data[ index_y ] - floor(x_w_data[ index_y ]);
if (x_w_data[ index_x ] < 0) {
theta_data[ index_x ] = x_w_data[ index_x ];
xw_floor = 0; xw_ceil = 0;
}
if (x_w_data[ index_x ] >= height_-1) {
theta_data[ index_x ] = x_w_data[ index_x ] - height_;
xw_floor = height_-1; xw_ceil = height_-1;
}
if (x_w_data[ index_y ] < 0) {
theta_data[ index_y ] = x_w_data[ index_y ];
yw_floor = 0; yw_ceil = 0;
}
if (x_w_data[ index_y ] >= width_-1) {
theta_data[ index_y ] = x_w_data[ index_y ] - width_;
yw_floor = width_-1; yw_ceil = width_-1;
}
theta_data_[ index_x ] = 1 - theta_data[ index_x ];
theta_data_[ index_y ] = 1 - theta_data[ index_y ];
int offset = (n * channels_ + c) * height_;
Dtype I0 = bottom_0_data_[ (offset + xw_floor) * width_ + yw_floor ];
Dtype I1 = bottom_0_data_[ (offset + xw_ceil ) * width_ + yw_floor ];
Dtype I2 = bottom_0_data_[ (offset + xw_floor) * width_ + yw_ceil ];
Dtype I3 = bottom_0_data_[ (offset + xw_ceil ) * width_ + yw_ceil ];
top_data[ (offset + h) * width_ + w ] = (theta_data_[index_x] * theta_data_[index_y] * I0) +
(theta_data[index_x] * theta_data_[index_y] * I1) +
(theta_data_[index_x] * theta_data[index_y] * I2) +
(theta_data[index_x] * theta_data[index_y] * I3);
}
}
template <typename Dtype>
__global__ void truncate_interp2_bwd(const int nthreads, const int num_, const int channels_, const int height_,
const int width_, const Dtype *theta_data, const Dtype* theta_data_,
const Dtype *x_w_data, Dtype *bottom_0_diff, Dtype *bottom_1_diff,
const Dtype *top_diff, const Dtype *top_data, const Dtype *bottom_0_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int temp = 0;
const int n = index / (channels_ * height_ * width_);
temp = index % (channels_ * height_ * width_);
const int c = temp / (height_ * width_);
temp = temp % (height_ * width_);
const int h = temp / width_;
const int w = temp % width_;
int index_x = ((n * 2 + 1) * height_ + h) * width_ + w;
int index_y = ((n * 2 + 0) * height_ + h) * width_ + w;
if (!(x_w_data[ index_x ] < 0 || x_w_data[ index_x ] > height_-1 ||
x_w_data[ index_y ] < 0 || x_w_data[ index_y ] > width_-1)) {
int xw_floor = (int)floor(x_w_data[ index_x ]);
int yw_floor = (int)floor(x_w_data[ index_y ]);
int xw_ceil = (int)ceil(x_w_data[ index_x ]);
int yw_ceil = (int)ceil(x_w_data[ index_y ]);
int bottom_0_index = ((n * channels_ + c) * height_ + h) * width_ + w;
int offset = (n * channels_ + c) * height_;
Dtype I0 = bottom_0_data[ (offset + xw_floor) * width_ + yw_floor ];
Dtype I1 = bottom_0_data[ (offset + xw_ceil ) * width_ + yw_floor ];
Dtype I2 = bottom_0_data[ (offset + xw_floor) * width_ + yw_ceil ];
Dtype I3 = bottom_0_data[ (offset + xw_ceil ) * width_ + yw_ceil ];
bottom_1_diff[ index_x ] += ( -1*theta_data_[index_y]*I0 +
theta_data_[index_y]*I1 -
theta_data[index_y] *I2 +
theta_data[index_y] *I3 ) *
top_diff[(offset + h) * width_ + w];
bottom_1_diff[ index_y ] += ( -1*theta_data_[index_x]*I0 -
theta_data[index_x] *I1 +
theta_data_[index_x]*I2 +
theta_data[index_x] *I3 ) *
top_diff[(offset + h) * width_ + w];
caffe_gpu_atomic_add((Dtype) theta_data_[ index_x ]*theta_data_[ index_y ]*top_diff[bottom_0_index],
bottom_0_diff + ((offset + xw_floor) * width_ + yw_floor ));
caffe_gpu_atomic_add((Dtype) theta_data[ index_x ] *theta_data_[ index_y ]*top_diff[bottom_0_index],
bottom_0_diff + ((offset + xw_ceil ) * width_ + yw_floor ));
caffe_gpu_atomic_add((Dtype) theta_data_[ index_x ]*theta_data[ index_y ] *top_diff[bottom_0_index],
bottom_0_diff + ((offset + xw_floor) * width_ + yw_ceil ));
caffe_gpu_atomic_add((Dtype) theta_data[ index_x ] *theta_data[ index_y ] *top_diff[bottom_0_index],
bottom_0_diff + ((offset + xw_ceil ) * width_ + yw_ceil ));
}
}
}
template <typename Dtype>
__global__ void nearest_interp2_bwd(const int nthreads, const int num_, const int channels_, const int height_,
const int width_, const Dtype *theta_data, const Dtype* theta_data_,
const Dtype *x_w_data, Dtype *bottom_0_diff, Dtype *bottom_1_diff,
const Dtype *top_diff, const Dtype *top_data, const Dtype *bottom_0_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int temp = 0;
const int n = index / (channels_ * height_ * width_);
temp = index % (channels_ * height_ * width_);
const int c = temp / (height_ * width_);
temp = temp % (height_ * width_);
const int h = temp / width_;
const int w = temp % width_;
int index_x = ((n * 2 + 1) * height_ + h) * width_ + w;
int index_y = ((n * 2 + 0) * height_ + h) * width_ + w;
int xw_floor = (int)floor(x_w_data[ index_x ]);
int yw_floor = (int)floor(x_w_data[ index_y ]);
int xw_ceil = (int)ceil(x_w_data[ index_x ]);
int yw_ceil = (int)ceil(x_w_data[ index_y ]);
if (x_w_data[ index_x ] < 0) {
xw_floor = 0; xw_ceil = 0;
}
if (x_w_data[ index_x ] >= height_-1) {
xw_floor = height_-1; xw_ceil = height_-1;
}
if (x_w_data[ index_y ] < 0) {
yw_floor = 0; yw_ceil = 0;
}
if (x_w_data[ index_y ] >= width_-1) {
yw_floor = width_-1; yw_ceil = width_-1;
}
int bottom_0_index = ((n * channels_ + c) * height_ + h) * width_ + w;
int offset = (n * channels_ + c) * height_;
Dtype I0 = bottom_0_data[ (offset + xw_floor) * width_ + yw_floor ];
Dtype I1 = bottom_0_data[ (offset + xw_ceil ) * width_ + yw_floor ];
Dtype I2 = bottom_0_data[ (offset + xw_floor) * width_ + yw_ceil ];
Dtype I3 = bottom_0_data[ (offset + xw_ceil ) * width_ + yw_ceil ];
bottom_1_diff[ index_x ] += ( -1*theta_data_[index_y]*I0 +
theta_data_[index_y]*I1 -
theta_data[index_y] *I2 +
theta_data[index_y] *I3 ) *
top_diff[(offset + h) * width_ + w];
bottom_1_diff[ index_y ] += ( -1*theta_data_[index_x]*I0 -
theta_data[index_x] *I1 +
theta_data_[index_x]*I2 +
theta_data[index_x] *I3 ) *
top_diff[(offset + h) * width_ + w];
caffe_gpu_atomic_add((Dtype) theta_data_[ index_x ]*theta_data_[ index_y ]*top_diff[bottom_0_index],
bottom_0_diff + ((offset + xw_floor) * width_ + yw_floor ));
caffe_gpu_atomic_add((Dtype) theta_data[ index_x ] *theta_data_[ index_y ]*top_diff[bottom_0_index],
bottom_0_diff + ((offset + xw_ceil ) * width_ + yw_floor ));
caffe_gpu_atomic_add((Dtype) theta_data_[ index_x ]*theta_data[ index_y ] *top_diff[bottom_0_index],
bottom_0_diff + ((offset + xw_floor) * width_ + yw_ceil ));
caffe_gpu_atomic_add((Dtype) theta_data[ index_x ] *theta_data[ index_y ] *top_diff[bottom_0_index],
bottom_0_diff + ((offset + xw_ceil ) * width_ + yw_ceil ));
}
}
template <typename Dtype>
void WarpLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data_0 = bottom[0]->gpu_data(); // image
const Dtype* bottom_data_1 = bottom[1]->gpu_data(); // optical flow
Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* theta_data = theta.mutable_gpu_data();
Dtype* theta_data_ = theta_.mutable_gpu_data();
Dtype* x_w_data = x_w.mutable_gpu_data();
const int num_kernels = num_ * channels_ * height_ * width_;
caffe_gpu_set(bottom[0]->count(), (Dtype)0., top_data);
switch (outliers_) {
case WarpParameter_WarpType_TRUNCATE:
truncate_interp2_fwd<Dtype><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>
(num_kernels, bottom_data_0, bottom_data_1, num_, channels_, height_, width_,
theta_data, theta_data_, x_w_data, top_data);
break;
case WarpParameter_WarpType_NEAREST:
nearest_interp2_fwd<Dtype><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>
(num_kernels, bottom_data_0, bottom_data_1, num_, channels_, height_, width_,
theta_data, theta_data_, x_w_data, top_data);
break;
}
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void WarpLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0] || propagate_down[1]) {
caffe_gpu_set(bottom[0]->count(), (Dtype)0., bottom[0]->mutable_gpu_diff());
caffe_gpu_set(bottom[1]->count(), (Dtype)0., bottom[1]->mutable_gpu_diff());
const Dtype* theta_data = theta.mutable_gpu_data();
const Dtype* theta_data_ = theta_.mutable_gpu_data();
const Dtype* x_w_data = x_w.mutable_gpu_data();
const Dtype* top_data = top[0]->gpu_data();
const Dtype* bottom_0_data = bottom[0]->gpu_data();
const Dtype* bottom_1_data = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_0_diff = bottom[0]->mutable_gpu_diff();
Dtype* bottom_1_diff = bottom[1]->mutable_gpu_diff();
const int num_kernels = num_ * channels_ * height_ * width_;
switch (outliers_) {
case WarpParameter_WarpType_NEAREST:
nearest_interp2_bwd<Dtype><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>
(num_kernels, num_, channels_, height_, width_, theta_data, theta_data_, x_w_data,
bottom_0_diff, bottom_1_diff, top_diff, top_data, bottom_0_data);
break;
case WarpParameter_WarpType_TRUNCATE:
truncate_interp2_bwd<Dtype><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>
(num_kernels, num_, channels_, height_, width_, theta_data, theta_data_, x_w_data,
bottom_0_diff, bottom_1_diff, top_diff, top_data, bottom_0_data);
break;
}
CUDA_POST_KERNEL_CHECK;
//caffe_gpu_mul(top[0]->count(), top_diff, bottom_0_diff, bottom_0_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(WarpLayer);
} // namespace caffe
|
ddcf8f2862f26c03fa9dfd9ed63577ee3fb92894.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THH/generic/THHTensorMathBlas.hip"
#else
#include <ATen/hip/HIPContext.h>
#include <ATen/NamedTensorUtils.h>
#define ERROR_ONLY_FP_TYPES(func) \
THError("%s for CUDA tensors only supports floating-point types. Try converting the tensors with .float()", func);
accreal THCTensor_(dot)(THCState *state, THCTensor *self, THCTensor *src)
{
at::NoNamesGuard guard;
if ( (THTensor_nDimension(self) != 1) || (THTensor_nDimension(src) != 1) ) {
THError("1D tensors expected, got %dD, %dD tensors",
THTensor_nDimension(self), THTensor_nDimension(src));
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
THArgCheck(THCTensor_(nElement)(state, self) ==
THCTensor_(nElement)(state, src), 2, "sizes do not match");
self = THCTensor_(newContiguous)(state, self);
src = THCTensor_(newContiguous)(state, src);
#ifdef THC_REAL_IS_FLOAT
accreal result = THCudaBlas_Sdot(state,
THCTensor_(nElement)(state, self),
THCTensor_(data)(state, self), 1,
THCTensor_(data)(state, src), 1);
#elif defined(THC_REAL_IS_DOUBLE)
accreal result = THCudaBlas_Ddot(state,
THCTensor_(nElement)(state, self),
THCTensor_(data)(state, self), 1,
THCTensor_(data)(state, src), 1);
#elif defined(THC_REAL_IS_HALF)
accreal result = THCudaBlas_Hdot(state,
THCTensor_(nElement)(state, self),
THCTensor_(data)(state, self), 1,
THCTensor_(data)(state, src), 1);
#endif
THCTensor_(free)(state, src);
THCTensor_(free)(state, self);
return result;
#else
ERROR_ONLY_FP_TYPES("dot");
return ScalarConvert<int, accreal>::to(0);
#endif
}
static void THCTensor_(addmvImpl)(THCState *state, THCTensor *r_, THCTensor *t, THCTensor *mat, THCTensor *vec, scalar_t beta, scalar_t alpha)
{
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_BFLOAT16)
THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, r_, t, mat, vec));
if( (mat->dim() != 2) || (THTensor_nDimension(vec) != 1) )
THError("2D tensor and 1D tensor expected, got %dD, %dD tensors",
mat->dim(), THTensor_nDimension(vec));
auto vec_size = THTensor_sizeLegacyNoScalars(vec, 0);
auto vec_stride = THTensor_strideLegacyNoScalars(vec, 0);
if( mat->size(1) != THTensor_sizeLegacyNoScalars(vec, 0) )
THError("size mismatch");
if(t->dim() != 1)
THError("size mismatch");
if(THTensor_sizeLegacyNoScalars(t, 0) != mat->size(0))
THError("size mismatch");
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
if(r_ != t)
{
THCTensor_(resizeAs)(state, r_, t);
THCTensor_(copy)(state, r_, t);
}
auto r_stride = THTensor_strideLegacyNoScalars(r_, 0);
if(mat->stride(0) == 1)
{
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_Sgemv(state, 'n', mat->size(0), mat->size(1),
alpha, THCTensor_(data)(state, mat), mat->stride(1),
THCTensor_(data)(state, vec), vec_stride,
beta, THCTensor_(data)(state, r_), r_stride);
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dgemv(state, 'n', mat->size(0), mat->size(1),
alpha, THCTensor_(data)(state, mat), mat->stride(1),
THCTensor_(data)(state, vec), vec_stride,
beta, THCTensor_(data)(state, r_), r_stride);
#endif
}
else if(mat->stride(1) == 1)
{
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_Sgemv(state, 't', mat->size(1), mat->size(0),
alpha, THCTensor_(data)(state, mat), mat->stride(0),
THCTensor_(data)(state, vec), vec_stride,
beta, THCTensor_(data)(state, r_), r_stride);
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dgemv(state, 't', mat->size(1), mat->size(0),
alpha, THCTensor_(data)(state, mat), mat->stride(0),
THCTensor_(data)(state, vec), vec_stride,
beta, THCTensor_(data)(state, r_), r_stride);
#endif
}
else
{
THCTensor *cmat = THCTensor_(newContiguous)(state, mat);
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_Sgemv(state, 't', mat->size(1), mat->size(0),
alpha, THCTensor_(data)(state, cmat), cmat->stride(0),
THCTensor_(data)(state, vec), vec_stride,
beta, THCTensor_(data)(state, r_), r_stride);
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dgemv(state, 't', mat->size(1), mat->size(0),
alpha, THCTensor_(data)(state, cmat), cmat->stride(0),
THCTensor_(data)(state, vec), vec_stride,
beta, THCTensor_(data)(state, r_), r_stride);
#endif
THCTensor_(free)(state, cmat);
}
// In hipblasSgemv, hipblasDgemv (x,0).mv(0) does not
// handle beta, whereas hipblasSgemm, hipblasDgemm do for case where (x,0).mm(0,y).
if (THTensor_sizeLegacyNoScalars(vec, 0) == 0 && mat->size(0) != 0) {
if(THCNumerics<scalar_t>::eq(beta, ScalarConvert<int, scalar_t>::to(0))) {
THCTensor_(zero)(state, r_);
} else if(THCNumerics<scalar_t>::ne(beta, ScalarConvert<int, scalar_t>::to(1))) {
THCTensor_(mul)(state, r_, r_, beta);
}
}
#elif defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_BFLOAT16)
// Currently no Hgemv/SgemvEx in Cublas
THCTensor *vecAsMatrix = THCTensor_(newWithTensor)(state, vec);
THCTensor_(resize2d)(state, vecAsMatrix, vec_size, 1);
THCTensor *tAsMatrix = THCTensor_(newWithTensor)(state, t);
THCTensor_(resize2d)(state, tAsMatrix, THTensor_sizeLegacyNoScalars(tAsMatrix, 0), 1);
THCTensor_(addmm)(state, r_, tAsMatrix, mat, vecAsMatrix, beta, alpha);
// r_ will have answer as matrix, need to return a vector
THCTensor_(resize1d)(state, r_, THTensor_sizeLegacyNoScalars(r_, 0));
THCTensor_(free)(state, vecAsMatrix);
THCTensor_(free)(state, tAsMatrix);
#endif
#else
ERROR_ONLY_FP_TYPES("addmv");
#endif
}
void THCTensor_(addmv)(THCState *state, THCTensor *r_, THCTensor *t, THCTensor *mat, THCTensor *vec, scalar_t beta, scalar_t alpha) {
{
at::NoNamesGuard guard;
THCTensor_(addmvImpl)(state, r_, t, mat, vec, beta, alpha);
}
at::namedinference::propagate_names_for_addmv(r_, mat, vec, t);
}
void THCTensor_(addr)(THCState *state, THCTensor *r_, THCTensor *t, THCTensor *vec1, THCTensor *vec2, scalar_t beta, scalar_t alpha)
{
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_BFLOAT16)
THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, r_, t, vec1, vec2));
if ( (THTensor_nDimension(vec1) != 1) || (THTensor_nDimension(vec2) != 1) ) {
THError("1D tensors expected, got %dD, %dD tensors",
THTensor_nDimension(vec1), THTensor_nDimension(vec2));
}
auto vec1_size = THTensor_sizeLegacyNoScalars(vec1, 0);
auto vec2_size = THTensor_sizeLegacyNoScalars(vec2, 0);
auto vec1_stride = THTensor_strideLegacyNoScalars(vec1, 0);
auto vec2_stride = THTensor_strideLegacyNoScalars(vec2, 0);
if (t->dim() != 2) {
THError("size mismatch");
}
if ( (t->size(0) != vec1_size) || (t->size(1) != vec2_size) ) {
THError("size mismatch");
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
if (r_ != t) {
THCTensor_(resizeAs)(state, r_, t);
THCTensor_(copy)(state, r_, t);
}
if(THCNumerics<scalar_t>::eq(beta, ScalarConvert<int, scalar_t>::to(0))) {
THCTensor_(zero)(state, r_);
} else if(THCNumerics<scalar_t>::ne(beta, ScalarConvert<int, scalar_t>::to(1))) {
THCTensor_(mul)(state, r_, r_, beta);
}
if(r_->stride(0) == 1)
{
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_Sger(state, vec1_size, vec2_size,
alpha, THCTensor_(data)(state, vec1), vec1_stride,
THCTensor_(data)(state, vec2), vec2_stride,
THCTensor_(data)(state, r_), r_->stride(1));
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dger(state, vec1_size, vec2_size,
alpha, THCTensor_(data)(state, vec1), vec1_stride,
THCTensor_(data)(state, vec2), vec2_stride,
THCTensor_(data)(state, r_), r_->stride(1));
#endif
}
else if(r_->stride(1) == 1)
{
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_Sger(state, vec2_size, vec1_size,
alpha, THCTensor_(data)(state, vec2), vec2_stride,
THCTensor_(data)(state, vec1), vec1_stride,
THCTensor_(data)(state, r_), r_->stride(0));
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dger(state, vec2_size, vec1_size,
alpha, THCTensor_(data)(state, vec2), vec2_stride,
THCTensor_(data)(state, vec1), vec1_stride,
THCTensor_(data)(state, r_), r_->stride(0));
#endif
}
else
{
THCTensor *cr = THCTensor_(newClone)(state, r_);
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_Sger(state, vec2_size, vec1_size,
alpha, THCTensor_(data)(state, vec2), vec2_stride,
THCTensor_(data)(state, vec1), vec1_stride,
THCTensor_(data)(state, cr), cr->stride(0));
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dger(state, vec2_size, vec1_size,
alpha, THCTensor_(data)(state, vec2), vec2_stride,
THCTensor_(data)(state, vec1), vec1_stride,
THCTensor_(data)(state, cr), cr->stride(0));
#endif
THCTensor_(freeCopyTo)(state, cr, r_);
}
#elif defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_BFLOAT16)
// currently no Hger/SgerEx in Cublas.
THCTensor *vec2T = THCTensor_(newWithTensor)(state, vec2);
THCTensor_(resize2d)(state, vec2T, vec2_size, 1);
THCTensor_(transpose)(state, vec2T, NULL, 0, 1);
THCTensor *vec1M = THCTensor_(newWithTensor)(state, vec1);
THCTensor_(resize2d)(state, vec1M, vec1_size, 1);
THCTensor_(addmm)(state, r_, t, vec1M, vec2T, beta, alpha);
THCTensor_(free)(state, vec2T);
THCTensor_(free)(state, vec1M);
#endif
#else
ERROR_ONLY_FP_TYPES("addr");
#endif
}
static void THCTensor_(addmmImpl)(THCState *state, THCTensor *r_, THCTensor *t, THCTensor *m1, THCTensor *m2, scalar_t beta, scalar_t alpha)
{
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_BFLOAT16)
THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, r_, t, m1, m2));
char transpose_r, transpose_m1, transpose_m2;
THCTensor *r__, *m1_, *m2_;
if( (m1->dim() != 2) || (m2->dim() != 2) )
THError("2D tensors expected, got %dD, %dD tensors", m1->dim(), m2->dim());
if(t->dim() != 2)
THError("2D tensor expected, got %dD tensor for t", t->dim());
if(m1->size(1) != m2->size(0)) {
THCDescBuff bm1 = THCTensor_(sizeDesc)(state, m1);
THCDescBuff bm2 = THCTensor_(sizeDesc)(state, m2);
THError("size mismatch, m1: %s, m2: %s", bm1.str, bm2.str);
}
if( (t->size(0) != m1->size(0)) || (t->size(1) != m2->size(1)) ) {
THCDescBuff bt = THCTensor_(sizeDesc)(state, t);
THCDescBuff bm1 = THCTensor_(sizeDesc)(state, m1);
THCDescBuff bm2 = THCTensor_(sizeDesc)(state, m2);
THError("size mismatch, t: %s, m1: %s, m2: %s", bt.str, bm1.str, bm2.str);
}
if(t != r_)
{
THCTensor_(resizeAs)(state, r_, t);
if (ScalarConvert<scalar_t, double>::to(beta) != 0.0) {
THCTensor_(copy)(state, r_, t);
}
}
if((r_->size(0) == 0) || (r_->size(1) == 0))
{
return;
}
/* r_ */
if(r_->stride(0) == 1 &&
r_->stride(1) != 0)
{
transpose_r = 'n';
r__ = r_;
}
else if(r_->stride(1) == 1 &&
r_->stride(0) != 0)
{
THCTensor *swap = m2;
m2 = m1;
m1 = swap;
transpose_r = 't';
r__ = r_;
}
else
{
transpose_r = 'n';
THCTensor *transp_r_ = THCTensor_(newTranspose)(state, r_, 0, 1);
r__ = THCTensor_(newClone)(state, transp_r_);
THCTensor_(free)(state, transp_r_);
THCTensor_(transpose)(state, r__, NULL, 0, 1);
}
/* m1 */
if(m1->stride((transpose_r == 'n' ? 0 : 1)) == 1 &&
m1->stride((transpose_r == 'n' ? 1 : 0)) != 0)
{
transpose_m1 = 'n';
m1_ = m1;
}
else if(m1->stride((transpose_r == 'n' ? 1 : 0)) == 1 &&
m1->stride((transpose_r == 'n' ? 0 : 1)) != 0)
{
transpose_m1 = 't';
m1_ = m1;
}
else
{
transpose_m1 = (transpose_r == 'n' ? 't' : 'n');
m1_ = THCTensor_(newContiguous)(state, m1);
}
/* m2 */
if(m2->stride((transpose_r == 'n' ? 0 : 1)) == 1 &&
m2->stride((transpose_r == 'n' ? 1 : 0)) != 0)
{
transpose_m2 = 'n';
m2_ = m2;
}
else if(m2->stride((transpose_r == 'n' ? 1 : 0)) == 1 &&
m2->stride((transpose_r == 'n' ? 0 : 1)) != 0)
{
transpose_m2 = 't';
m2_ = m2;
}
else
{
transpose_m2 = (transpose_r == 'n' ? 't' : 'n');
m2_ = THCTensor_(newContiguous)(state, m2);
}
#ifdef THC_REAL_IS_HALF
THCudaBlas_Hgemm(state,
transpose_m1,
transpose_m2,
r__->size((transpose_r == 'n' ? 0 : 1)),
r__->size((transpose_r == 'n' ? 1 : 0)),
m1_->size((transpose_r == 'n' ? 1 : 0)),
alpha,
THCTensor_(data)(state, m1_),
(transpose_m1 == 'n' ? m1_->stride((transpose_r == 'n' ? 1 : 0)) : m1_->stride((transpose_r == 'n' ? 0 : 1))),
THCTensor_(data)(state, m2_),
(transpose_m2 == 'n' ? m2_->stride((transpose_r == 'n' ? 1 : 0)) : m2_->stride((transpose_r == 'n' ? 0 : 1))),
beta,
THCTensor_(data)(state, r__),
r__->stride((transpose_r == 'n' ? 1 : 0)));
#elif defined(THC_REAL_IS_FLOAT)
THCudaBlas_Sgemm(state,
transpose_m1,
transpose_m2,
r__->size((transpose_r == 'n' ? 0 : 1)),
r__->size((transpose_r == 'n' ? 1 : 0)),
m1_->size((transpose_r == 'n' ? 1 : 0)),
alpha,
THCTensor_(data)(state, m1_),
(transpose_m1 == 'n' ? m1_->stride((transpose_r == 'n' ? 1 : 0)) : m1_->stride((transpose_r == 'n' ? 0 : 1))),
THCTensor_(data)(state, m2_),
(transpose_m2 == 'n' ? m2_->stride((transpose_r == 'n' ? 1 : 0)) : m2_->stride((transpose_r == 'n' ? 0 : 1))),
beta,
THCTensor_(data)(state, r__),
r__->stride((transpose_r == 'n' ? 1 : 0)));
#elif defined(THC_REAL_IS_BFLOAT16)
#if defined(__HIP_PLATFORM_HCC__)
THCudaBlas_Bgemm(state,
transpose_m1,
transpose_m2,
r__->size((transpose_r == 'n' ? 0 : 1)),
r__->size((transpose_r == 'n' ? 1 : 0)),
m1_->size((transpose_r == 'n' ? 1 : 0)),
alpha,
THCTensor_(data)(state, m1_),
(transpose_m1 == 'n' ? m1_->stride((transpose_r == 'n' ? 1 : 0)) : m1_->stride((transpose_r == 'n' ? 0 : 1))),
THCTensor_(data)(state, m2_),
(transpose_m2 == 'n' ? m2_->stride((transpose_r == 'n' ? 1 : 0)) : m2_->stride((transpose_r == 'n' ? 0 : 1))),
beta,
THCTensor_(data)(state, r__),
r__->stride((transpose_r == 'n' ? 1 : 0)));
#endif // __HIP_PLATFORM_HCC__
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dgemm(state,
transpose_m1,
transpose_m2,
r__->size((transpose_r == 'n' ? 0 : 1)),
r__->size((transpose_r == 'n' ? 1 : 0)),
m1_->size((transpose_r == 'n' ? 1 : 0)),
alpha,
THCTensor_(data)(state, m1_),
(transpose_m1 == 'n' ? m1_->stride((transpose_r == 'n' ? 1 : 0)) : m1_->stride((transpose_r == 'n' ? 0 : 1))),
THCTensor_(data)(state, m2_),
(transpose_m2 == 'n' ? m2_->stride((transpose_r == 'n' ? 1 : 0)) : m2_->stride((transpose_r == 'n' ? 0 : 1))),
beta,
THCTensor_(data)(state, r__),
r__->stride((transpose_r == 'n' ? 1 : 0)));
#endif
/* free intermediate variables */
if(m1_ != m1) {
THCTensor_(free)(state, m1_);
}
if(m2_ != m2) {
THCTensor_(free)(state, m2_);
}
if(r__ != r_) {
THCTensor_(freeCopyTo)(state, r__, r_);
}
#if defined(THC_REAL_IS_BFLOAT16) && !defined(__HIP_PLATFORM_HCC__)
// To avoid "variable was set but never used" warning
[&transpose_m1, &transpose_m2]{}();
TORCH_CHECK(false, "Bgemm not supported on at::BFloat16 type");
#endif
#else
ERROR_ONLY_FP_TYPES("addmm");
#endif
}
void THCTensor_(addmm)(THCState *state, THCTensor *r_, THCTensor *t, THCTensor *m1, THCTensor *m2, scalar_t beta, scalar_t alpha) {
{
at::NoNamesGuard guard;
THCTensor_(addmmImpl)(state, r_, t, m1, m2, beta, alpha);
}
at::namedinference::propagate_names_for_addmm(r_, m1, m2, t);
}
void THCTensor_(addbmm)(THCState *state, THCTensor *result, THCTensor *t,
THCTensor *batch1, THCTensor *batch2, scalar_t beta, scalar_t alpha) {
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_BFLOAT16)
THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, result, t, batch1, batch2));
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, t) == 2, 4, "expected 2D tensor");
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, batch1) == 3, 6, "expected 3D tensor");
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, batch2) == 3, 7, "expected 3D tensor");
int64_t batchnum = THCTensor_(size)(state, batch1, 0);
int64_t m1d1 = THCTensor_(size)(state, batch1, 1);
int64_t innerdim = THCTensor_(size)(state, batch1, 2);
int64_t m2d2 = THCTensor_(size)(state, batch2, 2);
THArgCheck(batchnum == THCTensor_(size)(state, batch2, 0), 7,
"equal number of batches expected");
// M is t, as listed in the docs under addbmm
THArgCheck(m1d1 == THCTensor_(size)(state, t, 0), 6,
"first dimension must match first dimension of M");
THArgCheck(m2d2 == THCTensor_(size)(state, t, 1), 7,
"second dimension must match second dimension of M");
THArgCheck(innerdim == THCTensor_(size)(state, batch2, 1), 6,
"second dimension must match first dimension of batch2");
if (t != result) {
THCTensor_(resizeAs)(state, result, t);
if (ScalarConvert<scalar_t, double>::to(beta) != 0.0) {
THCTensor_(copy)(state, result, t);
}
}
THCTensor *slice1 = THCTensor_(new)(state);
THCTensor *slice2 = THCTensor_(new)(state);
for (int64_t i=0; i<batchnum; i++) {
THCTensor_(select)(state, slice1, batch1, 0, i);
THCTensor_(select)(state, slice2, batch2, 0, i);
THCTensor_(addmm)(state, result, result, slice1, slice2, beta, alpha);
beta = ScalarConvert<int, scalar_t>::to(1);
}
THCTensor_(free)(state, slice1);
THCTensor_(free)(state, slice2);
#else
ERROR_ONLY_FP_TYPES("addbmm");
#endif
}
__global__ void createBatchGemmBuffer(const scalar_t** buffer, scalar_t* data,
int64_t stride, int64_t num_batches) {
const int64_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_batches) {
buffer[idx] = data + idx * stride;
}
}
__global__ void createBatchGemmBuffer3(const scalar_t** buffer1, const scalar_t ** buffer2, const scalar_t ** buffer3, scalar_t* data1,
scalar_t * data2, scalar_t * data3, int64_t stride1, int64_t stride2, int64_t stride3, int64_t num_batches) {
const int64_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_batches) {
buffer1[idx] = data1 + idx * stride1;
buffer2[idx] = data2 + idx * stride2;
buffer3[idx] = data3 + idx * stride3;
}
}
void THCTensor_(baddbmm)(THCState *state, THCTensor *result, THCTensor *t,
THCTensor *batch1, THCTensor *batch2,
scalar_t beta, scalar_t alpha) {
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_BFLOAT16)
THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, result, t, batch1, batch2));
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, t) == 3, 4, "expected 3D tensor");
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, batch1) == 3, 6, "expected 3D tensor");
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, batch2) == 3, 7, "expected 3D tensor");
THArgCheck(THCTensor_(size)(state, t, 0) == THCTensor_(size)(state, batch1, 0), 6,
"equal number of batches expected");
THArgCheck(THCTensor_(size)(state, t, 0) == THCTensor_(size)(state, batch2, 0), 7,
"equal number of batches expected");
auto maybe_outnames = at::namedinference::compute_baddbmm_outnames(result, batch1, batch2, t);
{
at::NoNamesGuard guard;
THArgCheck(THCTensor_(size)(state, t, 1) == THCTensor_(size)(state, batch1, 1), 6,
"wrong matrix size");
THArgCheck(THCTensor_(size)(state, t, 2) == THCTensor_(size)(state, batch2, 2), 7,
"wrong matrix size");
THArgCheck(THCTensor_(size)(state, batch1, 2) == THCTensor_(size)(state, batch2, 1), 6,
"wrong matrix size");
if (t != result) {
THCTensor_(resizeAs)(state, result, t);
if (ScalarConvert<scalar_t, double>::to(beta) != 0.0) {
THCTensor_(copy)(state, result, t);
}
}
bool transpose_result;
char transpose_batch1, transpose_batch2;
int64_t lda, ldb, ldc;
THCTensor *result_, *batch1_, *batch2_;
if (result->stride(1) == 1)
{
transpose_result = false;
result_ = result;
ldc = result_->stride(2);
}
else if (result->stride(2) == 1)
{
transpose_result = true;
THCTensor *swap = batch2;
batch2 = batch1;
batch1 = swap;
result_ = result;
ldc = result_->stride(1);
}
else
{
transpose_result = false;
THCTensor *transp_r_ = THCTensor_(newTranspose)(state, result, 1, 2);
result_ = THCTensor_(newClone)(state, transp_r_);
THCTensor_(free)(state, transp_r_);
THCTensor_(transpose)(state, result_, NULL, 1, 2);
ldc = result_->stride(2);
}
if (batch1->stride(transpose_result ? 2 : 1) == 1 &&
batch1->stride(transpose_result ? 1 : 2) != 0)
{
transpose_batch1 = 'n';
batch1_ = batch1;
lda = batch1_->stride(transpose_result ? 1 : 2);
}
else if (batch1->stride(transpose_result ? 1 : 2) == 1 &&
batch1->stride(transpose_result ? 2 : 1) != 0)
{
transpose_batch1 = 't';
batch1_ = batch1;
lda = batch1_->stride(transpose_result ? 2 : 1);
}
else
{
transpose_batch1 = transpose_result ? 'n' : 't';
// batch1_ is later freed if batch1_ != batch1
if (THCTensor_(isContiguous)(state, batch1)) {
batch1_ = batch1;
} else {
batch1_ = THCTensor_(newContiguous)(state, batch1);
}
lda = batch1_->stride(1);
}
if (batch2->stride(transpose_result ? 2 : 1) == 1 &&
batch2->stride(transpose_result ? 1 : 2) != 0)
{
transpose_batch2 = 'n';
batch2_ = batch2;
ldb = batch2_->stride(transpose_result ? 1 : 2);
}
else if (batch2->stride(transpose_result ? 1 : 2) == 1 &&
batch2->stride(transpose_result ? 2 : 1) != 0)
{
transpose_batch2 = 't';
batch2_ = batch2;
ldb = batch2_->stride(transpose_result ? 2 : 1);
}
else
{
transpose_batch2 = transpose_result ? 'n' : 't';
// batch2_ is later freed if batch2_ != batch2
if (THCTensor_(isContiguous)(state, batch2)) {
batch2_ = batch2;
} else {
batch2_ = THCTensor_(newContiguous)(state, batch2);
}
ldb = batch2_->stride(1);
}
int64_t num_batches = result_->size(0);
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
// Compute pointers to matrices in each batch.
#if TORCH_HIP_VERSION < 8000 && !defined __HIP_PLATFORM_HCC__
size_t matrices_size = num_batches * sizeof(scalar_t*);
// Copy pointers to device.
auto d_matrices1 = static_cast<const scalar_t**>(THCudaMalloc(state, matrices_size));
auto d_matrices2 = static_cast<const scalar_t**>(THCudaMalloc(state, matrices_size));
auto d_result_matrices = static_cast<scalar_t**>(THCudaMalloc(state, matrices_size));
const int64_t block = 512;
const int64_t grid = (num_batches + block - 1) / block;
hipLaunchKernelGGL(( createBatchGemmBuffer3), dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
d_matrices1, d_matrices2, (const scalar_t**)d_result_matrices, THCTensor_(data)(state, batch1_),
THCTensor_(data)(state, batch2_), THCTensor_(data)(state, result_),
batch1_->stride(0), batch2_->stride(0), result_->stride(0), num_batches);
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_SgemmBatched(
state,
transpose_batch1,
transpose_batch2,
result_->size(transpose_result ? 2 : 1),
result_->size(transpose_result ? 1 : 2),
batch1_->size(transpose_result ? 1 : 2),
alpha,
d_matrices1, lda,
d_matrices2, ldb,
beta,
d_result_matrices, ldc,
num_batches);
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_DgemmBatched(
state,
transpose_batch1,
transpose_batch2,
result_->size(transpose_result ? 2 : 1),
result_->size(transpose_result ? 1 : 2),
batch1_->size(transpose_result ? 1 : 2),
alpha,
d_matrices1, lda,
d_matrices2, ldb,
beta,
d_result_matrices, ldc,
num_batches);
#endif //THC_REAL
THCudaFree(state, d_matrices1);
THCudaFree(state, d_matrices2);
THCudaFree(state, d_result_matrices);
#else
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_SgemmStridedBatched(
state,
transpose_batch1,
transpose_batch2,
result_->size(transpose_result ? 2 : 1),
result_->size(transpose_result ? 1 : 2),
batch1_->size(transpose_result ? 1 : 2),
alpha,
THCTensor_(data)(state, batch1_), lda, batch1_->stride(0),
THCTensor_(data)(state, batch2_), ldb, batch2_->stride(0),
beta,
THCTensor_(data)(state, result_), ldc, result_->stride(0),
num_batches);
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_DgemmStridedBatched(
state,
transpose_batch1,
transpose_batch2,
result_->size(transpose_result ? 2 : 1),
result_->size(transpose_result ? 1 : 2),
batch1_->size(transpose_result ? 1 : 2),
alpha,
THCTensor_(data)(state, batch1_), lda, batch1_->stride(0),
THCTensor_(data)(state, batch2_), ldb, batch2_->stride(0),
beta,
THCTensor_(data)(state, result_), ldc, result_->stride(0),
num_batches);
#endif //THC_REAL
#endif //TORCH_HIP_VERSION
#elif defined(THC_REAL_IS_HALF)
#if TORCH_HIP_VERSION < 9010
// Currently no HgemmBatched in Cublas
for (int64_t i = 0; i < num_batches; ++i) {
THCudaBlas_Hgemm(
state,
transpose_batch1,
transpose_batch2,
result_->size(transpose_result ? 2 : 1),
result_->size(transpose_result ? 1 : 2),
batch1_->size(transpose_result ? 1 : 2),
alpha,
THCTensor_(data)(state, batch1_) + i * batch1_->stride(0), lda,
THCTensor_(data)(state, batch2_) + i * batch2_->stride(0), ldb,
beta,
THCTensor_(data)(state, result_) + i * result_->stride(0), ldc);
}
#else
#ifndef __HIP_PLATFORM_HCC__
hipDeviceProp_t* prop = at::cuda::getCurrentDeviceProperties();
if (prop->major >= 5){
#endif
THCudaBlas_HgemmStridedBatched(
state,
transpose_batch1,
transpose_batch2,
result_->size(transpose_result ? 2 : 1),
result_->size(transpose_result ? 1 : 2),
batch1_->size(transpose_result ? 1 : 2),
alpha,
THCTensor_(data)(state, batch1_), lda, batch1_->stride(0),
THCTensor_(data)(state, batch2_), ldb, batch2_->stride(0),
beta,
THCTensor_(data)(state, result_), ldc, result_->stride(0),
num_batches);
#ifndef __HIP_PLATFORM_HCC__
} else {
for (int64_t i = 0; i < num_batches; ++i) {
THCudaBlas_Hgemm(
state,
transpose_batch1,
transpose_batch2,
result_->size(transpose_result ? 2 : 1),
result_->size(transpose_result ? 1 : 2),
batch1_->size(transpose_result ? 1 : 2),
alpha,
THCTensor_(data)(state, batch1_) + i * batch1_->stride(0), lda,
THCTensor_(data)(state, batch2_) + i * batch2_->stride(0), ldb,
beta,
THCTensor_(data)(state, result_) + i * result_->stride(0), ldc);
}
}
#endif
#endif //TORCH_HIP_VERSION
#elif defined(THC_REAL_IS_BFLOAT16)
#if defined(__HIP_PLATFORM_HCC__)
THCudaBlas_BgemmStridedBatched(
state,
transpose_batch1,
transpose_batch2,
result_->size(transpose_result ? 2 : 1),
result_->size(transpose_result ? 1 : 2),
batch1_->size(transpose_result ? 1 : 2),
alpha,
THCTensor_(data)(state, batch1_), lda, batch1_->stride(0),
THCTensor_(data)(state, batch2_), ldb, batch2_->stride(0),
beta,
THCTensor_(data)(state, result_), ldc, result_->stride(0),
num_batches);
#endif // __HIP_PLATFORM_HCC__
#endif
if (batch1_ != batch1) {
THCTensor_(free)(state, batch1_);
}
if (batch2_ != batch2) {
THCTensor_(free)(state, batch2_);
}
if (result_ != result) {
THCTensor_(freeCopyTo)(state, result_, result);
}
#if defined(THC_REAL_IS_BFLOAT16) && !defined(__HIP_PLATFORM_HCC__)
// To avoid "variable was set but never used" warning
[&transpose_batch1, &transpose_batch2, &lda, &ldb, &ldc]{}();
TORCH_CHECK(false, "BgemmStridedBatched is not supported with at::BFloat16 type");
#endif
}
#if !defined(THC_REAL_IS_BFLOAT16) || defined(__HIP_PLATFORM_HCC__)
at::namedinference::propagate_names_if_nonempty(result, maybe_outnames);
#endif
#else
ERROR_ONLY_FP_TYPES("baddbmm");
#endif
}
#endif
|
ddcf8f2862f26c03fa9dfd9ed63577ee3fb92894.cu
|
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THC/generic/THCTensorMathBlas.cu"
#else
#include <ATen/cuda/CUDAContext.h>
#include <ATen/NamedTensorUtils.h>
#define ERROR_ONLY_FP_TYPES(func) \
THError("%s for CUDA tensors only supports floating-point types. Try converting the tensors with .float()", func);
accreal THCTensor_(dot)(THCState *state, THCTensor *self, THCTensor *src)
{
at::NoNamesGuard guard;
if ( (THTensor_nDimension(self) != 1) || (THTensor_nDimension(src) != 1) ) {
THError("1D tensors expected, got %dD, %dD tensors",
THTensor_nDimension(self), THTensor_nDimension(src));
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
THArgCheck(THCTensor_(nElement)(state, self) ==
THCTensor_(nElement)(state, src), 2, "sizes do not match");
self = THCTensor_(newContiguous)(state, self);
src = THCTensor_(newContiguous)(state, src);
#ifdef THC_REAL_IS_FLOAT
accreal result = THCudaBlas_Sdot(state,
THCTensor_(nElement)(state, self),
THCTensor_(data)(state, self), 1,
THCTensor_(data)(state, src), 1);
#elif defined(THC_REAL_IS_DOUBLE)
accreal result = THCudaBlas_Ddot(state,
THCTensor_(nElement)(state, self),
THCTensor_(data)(state, self), 1,
THCTensor_(data)(state, src), 1);
#elif defined(THC_REAL_IS_HALF)
accreal result = THCudaBlas_Hdot(state,
THCTensor_(nElement)(state, self),
THCTensor_(data)(state, self), 1,
THCTensor_(data)(state, src), 1);
#endif
THCTensor_(free)(state, src);
THCTensor_(free)(state, self);
return result;
#else
ERROR_ONLY_FP_TYPES("dot");
return ScalarConvert<int, accreal>::to(0);
#endif
}
static void THCTensor_(addmvImpl)(THCState *state, THCTensor *r_, THCTensor *t, THCTensor *mat, THCTensor *vec, scalar_t beta, scalar_t alpha)
{
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_BFLOAT16)
THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, r_, t, mat, vec));
if( (mat->dim() != 2) || (THTensor_nDimension(vec) != 1) )
THError("2D tensor and 1D tensor expected, got %dD, %dD tensors",
mat->dim(), THTensor_nDimension(vec));
auto vec_size = THTensor_sizeLegacyNoScalars(vec, 0);
auto vec_stride = THTensor_strideLegacyNoScalars(vec, 0);
if( mat->size(1) != THTensor_sizeLegacyNoScalars(vec, 0) )
THError("size mismatch");
if(t->dim() != 1)
THError("size mismatch");
if(THTensor_sizeLegacyNoScalars(t, 0) != mat->size(0))
THError("size mismatch");
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
if(r_ != t)
{
THCTensor_(resizeAs)(state, r_, t);
THCTensor_(copy)(state, r_, t);
}
auto r_stride = THTensor_strideLegacyNoScalars(r_, 0);
if(mat->stride(0) == 1)
{
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_Sgemv(state, 'n', mat->size(0), mat->size(1),
alpha, THCTensor_(data)(state, mat), mat->stride(1),
THCTensor_(data)(state, vec), vec_stride,
beta, THCTensor_(data)(state, r_), r_stride);
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dgemv(state, 'n', mat->size(0), mat->size(1),
alpha, THCTensor_(data)(state, mat), mat->stride(1),
THCTensor_(data)(state, vec), vec_stride,
beta, THCTensor_(data)(state, r_), r_stride);
#endif
}
else if(mat->stride(1) == 1)
{
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_Sgemv(state, 't', mat->size(1), mat->size(0),
alpha, THCTensor_(data)(state, mat), mat->stride(0),
THCTensor_(data)(state, vec), vec_stride,
beta, THCTensor_(data)(state, r_), r_stride);
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dgemv(state, 't', mat->size(1), mat->size(0),
alpha, THCTensor_(data)(state, mat), mat->stride(0),
THCTensor_(data)(state, vec), vec_stride,
beta, THCTensor_(data)(state, r_), r_stride);
#endif
}
else
{
THCTensor *cmat = THCTensor_(newContiguous)(state, mat);
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_Sgemv(state, 't', mat->size(1), mat->size(0),
alpha, THCTensor_(data)(state, cmat), cmat->stride(0),
THCTensor_(data)(state, vec), vec_stride,
beta, THCTensor_(data)(state, r_), r_stride);
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dgemv(state, 't', mat->size(1), mat->size(0),
alpha, THCTensor_(data)(state, cmat), cmat->stride(0),
THCTensor_(data)(state, vec), vec_stride,
beta, THCTensor_(data)(state, r_), r_stride);
#endif
THCTensor_(free)(state, cmat);
}
// In cublasSgemv, cublasDgemv (x,0).mv(0) does not
// handle beta, whereas cublasSgemm, cublasDgemm do for case where (x,0).mm(0,y).
if (THTensor_sizeLegacyNoScalars(vec, 0) == 0 && mat->size(0) != 0) {
if(THCNumerics<scalar_t>::eq(beta, ScalarConvert<int, scalar_t>::to(0))) {
THCTensor_(zero)(state, r_);
} else if(THCNumerics<scalar_t>::ne(beta, ScalarConvert<int, scalar_t>::to(1))) {
THCTensor_(mul)(state, r_, r_, beta);
}
}
#elif defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_BFLOAT16)
// Currently no Hgemv/SgemvEx in Cublas
THCTensor *vecAsMatrix = THCTensor_(newWithTensor)(state, vec);
THCTensor_(resize2d)(state, vecAsMatrix, vec_size, 1);
THCTensor *tAsMatrix = THCTensor_(newWithTensor)(state, t);
THCTensor_(resize2d)(state, tAsMatrix, THTensor_sizeLegacyNoScalars(tAsMatrix, 0), 1);
THCTensor_(addmm)(state, r_, tAsMatrix, mat, vecAsMatrix, beta, alpha);
// r_ will have answer as matrix, need to return a vector
THCTensor_(resize1d)(state, r_, THTensor_sizeLegacyNoScalars(r_, 0));
THCTensor_(free)(state, vecAsMatrix);
THCTensor_(free)(state, tAsMatrix);
#endif
#else
ERROR_ONLY_FP_TYPES("addmv");
#endif
}
void THCTensor_(addmv)(THCState *state, THCTensor *r_, THCTensor *t, THCTensor *mat, THCTensor *vec, scalar_t beta, scalar_t alpha) {
{
at::NoNamesGuard guard;
THCTensor_(addmvImpl)(state, r_, t, mat, vec, beta, alpha);
}
at::namedinference::propagate_names_for_addmv(r_, mat, vec, t);
}
void THCTensor_(addr)(THCState *state, THCTensor *r_, THCTensor *t, THCTensor *vec1, THCTensor *vec2, scalar_t beta, scalar_t alpha)
{
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_BFLOAT16)
THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, r_, t, vec1, vec2));
if ( (THTensor_nDimension(vec1) != 1) || (THTensor_nDimension(vec2) != 1) ) {
THError("1D tensors expected, got %dD, %dD tensors",
THTensor_nDimension(vec1), THTensor_nDimension(vec2));
}
auto vec1_size = THTensor_sizeLegacyNoScalars(vec1, 0);
auto vec2_size = THTensor_sizeLegacyNoScalars(vec2, 0);
auto vec1_stride = THTensor_strideLegacyNoScalars(vec1, 0);
auto vec2_stride = THTensor_strideLegacyNoScalars(vec2, 0);
if (t->dim() != 2) {
THError("size mismatch");
}
if ( (t->size(0) != vec1_size) || (t->size(1) != vec2_size) ) {
THError("size mismatch");
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
if (r_ != t) {
THCTensor_(resizeAs)(state, r_, t);
THCTensor_(copy)(state, r_, t);
}
if(THCNumerics<scalar_t>::eq(beta, ScalarConvert<int, scalar_t>::to(0))) {
THCTensor_(zero)(state, r_);
} else if(THCNumerics<scalar_t>::ne(beta, ScalarConvert<int, scalar_t>::to(1))) {
THCTensor_(mul)(state, r_, r_, beta);
}
if(r_->stride(0) == 1)
{
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_Sger(state, vec1_size, vec2_size,
alpha, THCTensor_(data)(state, vec1), vec1_stride,
THCTensor_(data)(state, vec2), vec2_stride,
THCTensor_(data)(state, r_), r_->stride(1));
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dger(state, vec1_size, vec2_size,
alpha, THCTensor_(data)(state, vec1), vec1_stride,
THCTensor_(data)(state, vec2), vec2_stride,
THCTensor_(data)(state, r_), r_->stride(1));
#endif
}
else if(r_->stride(1) == 1)
{
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_Sger(state, vec2_size, vec1_size,
alpha, THCTensor_(data)(state, vec2), vec2_stride,
THCTensor_(data)(state, vec1), vec1_stride,
THCTensor_(data)(state, r_), r_->stride(0));
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dger(state, vec2_size, vec1_size,
alpha, THCTensor_(data)(state, vec2), vec2_stride,
THCTensor_(data)(state, vec1), vec1_stride,
THCTensor_(data)(state, r_), r_->stride(0));
#endif
}
else
{
THCTensor *cr = THCTensor_(newClone)(state, r_);
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_Sger(state, vec2_size, vec1_size,
alpha, THCTensor_(data)(state, vec2), vec2_stride,
THCTensor_(data)(state, vec1), vec1_stride,
THCTensor_(data)(state, cr), cr->stride(0));
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dger(state, vec2_size, vec1_size,
alpha, THCTensor_(data)(state, vec2), vec2_stride,
THCTensor_(data)(state, vec1), vec1_stride,
THCTensor_(data)(state, cr), cr->stride(0));
#endif
THCTensor_(freeCopyTo)(state, cr, r_);
}
#elif defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_BFLOAT16)
// currently no Hger/SgerEx in Cublas.
THCTensor *vec2T = THCTensor_(newWithTensor)(state, vec2);
THCTensor_(resize2d)(state, vec2T, vec2_size, 1);
THCTensor_(transpose)(state, vec2T, NULL, 0, 1);
THCTensor *vec1M = THCTensor_(newWithTensor)(state, vec1);
THCTensor_(resize2d)(state, vec1M, vec1_size, 1);
THCTensor_(addmm)(state, r_, t, vec1M, vec2T, beta, alpha);
THCTensor_(free)(state, vec2T);
THCTensor_(free)(state, vec1M);
#endif
#else
ERROR_ONLY_FP_TYPES("addr");
#endif
}
static void THCTensor_(addmmImpl)(THCState *state, THCTensor *r_, THCTensor *t, THCTensor *m1, THCTensor *m2, scalar_t beta, scalar_t alpha)
{
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_BFLOAT16)
THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, r_, t, m1, m2));
char transpose_r, transpose_m1, transpose_m2;
THCTensor *r__, *m1_, *m2_;
if( (m1->dim() != 2) || (m2->dim() != 2) )
THError("2D tensors expected, got %dD, %dD tensors", m1->dim(), m2->dim());
if(t->dim() != 2)
THError("2D tensor expected, got %dD tensor for t", t->dim());
if(m1->size(1) != m2->size(0)) {
THCDescBuff bm1 = THCTensor_(sizeDesc)(state, m1);
THCDescBuff bm2 = THCTensor_(sizeDesc)(state, m2);
THError("size mismatch, m1: %s, m2: %s", bm1.str, bm2.str);
}
if( (t->size(0) != m1->size(0)) || (t->size(1) != m2->size(1)) ) {
THCDescBuff bt = THCTensor_(sizeDesc)(state, t);
THCDescBuff bm1 = THCTensor_(sizeDesc)(state, m1);
THCDescBuff bm2 = THCTensor_(sizeDesc)(state, m2);
THError("size mismatch, t: %s, m1: %s, m2: %s", bt.str, bm1.str, bm2.str);
}
if(t != r_)
{
THCTensor_(resizeAs)(state, r_, t);
if (ScalarConvert<scalar_t, double>::to(beta) != 0.0) {
THCTensor_(copy)(state, r_, t);
}
}
if((r_->size(0) == 0) || (r_->size(1) == 0))
{
return;
}
/* r_ */
if(r_->stride(0) == 1 &&
r_->stride(1) != 0)
{
transpose_r = 'n';
r__ = r_;
}
else if(r_->stride(1) == 1 &&
r_->stride(0) != 0)
{
THCTensor *swap = m2;
m2 = m1;
m1 = swap;
transpose_r = 't';
r__ = r_;
}
else
{
transpose_r = 'n';
THCTensor *transp_r_ = THCTensor_(newTranspose)(state, r_, 0, 1);
r__ = THCTensor_(newClone)(state, transp_r_);
THCTensor_(free)(state, transp_r_);
THCTensor_(transpose)(state, r__, NULL, 0, 1);
}
/* m1 */
if(m1->stride((transpose_r == 'n' ? 0 : 1)) == 1 &&
m1->stride((transpose_r == 'n' ? 1 : 0)) != 0)
{
transpose_m1 = 'n';
m1_ = m1;
}
else if(m1->stride((transpose_r == 'n' ? 1 : 0)) == 1 &&
m1->stride((transpose_r == 'n' ? 0 : 1)) != 0)
{
transpose_m1 = 't';
m1_ = m1;
}
else
{
transpose_m1 = (transpose_r == 'n' ? 't' : 'n');
m1_ = THCTensor_(newContiguous)(state, m1);
}
/* m2 */
if(m2->stride((transpose_r == 'n' ? 0 : 1)) == 1 &&
m2->stride((transpose_r == 'n' ? 1 : 0)) != 0)
{
transpose_m2 = 'n';
m2_ = m2;
}
else if(m2->stride((transpose_r == 'n' ? 1 : 0)) == 1 &&
m2->stride((transpose_r == 'n' ? 0 : 1)) != 0)
{
transpose_m2 = 't';
m2_ = m2;
}
else
{
transpose_m2 = (transpose_r == 'n' ? 't' : 'n');
m2_ = THCTensor_(newContiguous)(state, m2);
}
#ifdef THC_REAL_IS_HALF
THCudaBlas_Hgemm(state,
transpose_m1,
transpose_m2,
r__->size((transpose_r == 'n' ? 0 : 1)),
r__->size((transpose_r == 'n' ? 1 : 0)),
m1_->size((transpose_r == 'n' ? 1 : 0)),
alpha,
THCTensor_(data)(state, m1_),
(transpose_m1 == 'n' ? m1_->stride((transpose_r == 'n' ? 1 : 0)) : m1_->stride((transpose_r == 'n' ? 0 : 1))),
THCTensor_(data)(state, m2_),
(transpose_m2 == 'n' ? m2_->stride((transpose_r == 'n' ? 1 : 0)) : m2_->stride((transpose_r == 'n' ? 0 : 1))),
beta,
THCTensor_(data)(state, r__),
r__->stride((transpose_r == 'n' ? 1 : 0)));
#elif defined(THC_REAL_IS_FLOAT)
THCudaBlas_Sgemm(state,
transpose_m1,
transpose_m2,
r__->size((transpose_r == 'n' ? 0 : 1)),
r__->size((transpose_r == 'n' ? 1 : 0)),
m1_->size((transpose_r == 'n' ? 1 : 0)),
alpha,
THCTensor_(data)(state, m1_),
(transpose_m1 == 'n' ? m1_->stride((transpose_r == 'n' ? 1 : 0)) : m1_->stride((transpose_r == 'n' ? 0 : 1))),
THCTensor_(data)(state, m2_),
(transpose_m2 == 'n' ? m2_->stride((transpose_r == 'n' ? 1 : 0)) : m2_->stride((transpose_r == 'n' ? 0 : 1))),
beta,
THCTensor_(data)(state, r__),
r__->stride((transpose_r == 'n' ? 1 : 0)));
#elif defined(THC_REAL_IS_BFLOAT16)
#if defined(__HIP_PLATFORM_HCC__)
THCudaBlas_Bgemm(state,
transpose_m1,
transpose_m2,
r__->size((transpose_r == 'n' ? 0 : 1)),
r__->size((transpose_r == 'n' ? 1 : 0)),
m1_->size((transpose_r == 'n' ? 1 : 0)),
alpha,
THCTensor_(data)(state, m1_),
(transpose_m1 == 'n' ? m1_->stride((transpose_r == 'n' ? 1 : 0)) : m1_->stride((transpose_r == 'n' ? 0 : 1))),
THCTensor_(data)(state, m2_),
(transpose_m2 == 'n' ? m2_->stride((transpose_r == 'n' ? 1 : 0)) : m2_->stride((transpose_r == 'n' ? 0 : 1))),
beta,
THCTensor_(data)(state, r__),
r__->stride((transpose_r == 'n' ? 1 : 0)));
#endif // __HIP_PLATFORM_HCC__
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dgemm(state,
transpose_m1,
transpose_m2,
r__->size((transpose_r == 'n' ? 0 : 1)),
r__->size((transpose_r == 'n' ? 1 : 0)),
m1_->size((transpose_r == 'n' ? 1 : 0)),
alpha,
THCTensor_(data)(state, m1_),
(transpose_m1 == 'n' ? m1_->stride((transpose_r == 'n' ? 1 : 0)) : m1_->stride((transpose_r == 'n' ? 0 : 1))),
THCTensor_(data)(state, m2_),
(transpose_m2 == 'n' ? m2_->stride((transpose_r == 'n' ? 1 : 0)) : m2_->stride((transpose_r == 'n' ? 0 : 1))),
beta,
THCTensor_(data)(state, r__),
r__->stride((transpose_r == 'n' ? 1 : 0)));
#endif
/* free intermediate variables */
if(m1_ != m1) {
THCTensor_(free)(state, m1_);
}
if(m2_ != m2) {
THCTensor_(free)(state, m2_);
}
if(r__ != r_) {
THCTensor_(freeCopyTo)(state, r__, r_);
}
#if defined(THC_REAL_IS_BFLOAT16) && !defined(__HIP_PLATFORM_HCC__)
// To avoid "variable was set but never used" warning
[&transpose_m1, &transpose_m2]{}();
TORCH_CHECK(false, "Bgemm not supported on at::BFloat16 type");
#endif
#else
ERROR_ONLY_FP_TYPES("addmm");
#endif
}
void THCTensor_(addmm)(THCState *state, THCTensor *r_, THCTensor *t, THCTensor *m1, THCTensor *m2, scalar_t beta, scalar_t alpha) {
{
at::NoNamesGuard guard;
THCTensor_(addmmImpl)(state, r_, t, m1, m2, beta, alpha);
}
at::namedinference::propagate_names_for_addmm(r_, m1, m2, t);
}
void THCTensor_(addbmm)(THCState *state, THCTensor *result, THCTensor *t,
THCTensor *batch1, THCTensor *batch2, scalar_t beta, scalar_t alpha) {
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_BFLOAT16)
THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, result, t, batch1, batch2));
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, t) == 2, 4, "expected 2D tensor");
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, batch1) == 3, 6, "expected 3D tensor");
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, batch2) == 3, 7, "expected 3D tensor");
int64_t batchnum = THCTensor_(size)(state, batch1, 0);
int64_t m1d1 = THCTensor_(size)(state, batch1, 1);
int64_t innerdim = THCTensor_(size)(state, batch1, 2);
int64_t m2d2 = THCTensor_(size)(state, batch2, 2);
THArgCheck(batchnum == THCTensor_(size)(state, batch2, 0), 7,
"equal number of batches expected");
// M is t, as listed in the docs under addbmm
THArgCheck(m1d1 == THCTensor_(size)(state, t, 0), 6,
"first dimension must match first dimension of M");
THArgCheck(m2d2 == THCTensor_(size)(state, t, 1), 7,
"second dimension must match second dimension of M");
THArgCheck(innerdim == THCTensor_(size)(state, batch2, 1), 6,
"second dimension must match first dimension of batch2");
if (t != result) {
THCTensor_(resizeAs)(state, result, t);
if (ScalarConvert<scalar_t, double>::to(beta) != 0.0) {
THCTensor_(copy)(state, result, t);
}
}
THCTensor *slice1 = THCTensor_(new)(state);
THCTensor *slice2 = THCTensor_(new)(state);
for (int64_t i=0; i<batchnum; i++) {
THCTensor_(select)(state, slice1, batch1, 0, i);
THCTensor_(select)(state, slice2, batch2, 0, i);
THCTensor_(addmm)(state, result, result, slice1, slice2, beta, alpha);
beta = ScalarConvert<int, scalar_t>::to(1);
}
THCTensor_(free)(state, slice1);
THCTensor_(free)(state, slice2);
#else
ERROR_ONLY_FP_TYPES("addbmm");
#endif
}
__global__ void createBatchGemmBuffer(const scalar_t** buffer, scalar_t* data,
int64_t stride, int64_t num_batches) {
const int64_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_batches) {
buffer[idx] = data + idx * stride;
}
}
__global__ void createBatchGemmBuffer3(const scalar_t** buffer1, const scalar_t ** buffer2, const scalar_t ** buffer3, scalar_t* data1,
scalar_t * data2, scalar_t * data3, int64_t stride1, int64_t stride2, int64_t stride3, int64_t num_batches) {
const int64_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_batches) {
buffer1[idx] = data1 + idx * stride1;
buffer2[idx] = data2 + idx * stride2;
buffer3[idx] = data3 + idx * stride3;
}
}
void THCTensor_(baddbmm)(THCState *state, THCTensor *result, THCTensor *t,
THCTensor *batch1, THCTensor *batch2,
scalar_t beta, scalar_t alpha) {
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_BFLOAT16)
THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, result, t, batch1, batch2));
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, t) == 3, 4, "expected 3D tensor");
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, batch1) == 3, 6, "expected 3D tensor");
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, batch2) == 3, 7, "expected 3D tensor");
THArgCheck(THCTensor_(size)(state, t, 0) == THCTensor_(size)(state, batch1, 0), 6,
"equal number of batches expected");
THArgCheck(THCTensor_(size)(state, t, 0) == THCTensor_(size)(state, batch2, 0), 7,
"equal number of batches expected");
auto maybe_outnames = at::namedinference::compute_baddbmm_outnames(result, batch1, batch2, t);
{
at::NoNamesGuard guard;
THArgCheck(THCTensor_(size)(state, t, 1) == THCTensor_(size)(state, batch1, 1), 6,
"wrong matrix size");
THArgCheck(THCTensor_(size)(state, t, 2) == THCTensor_(size)(state, batch2, 2), 7,
"wrong matrix size");
THArgCheck(THCTensor_(size)(state, batch1, 2) == THCTensor_(size)(state, batch2, 1), 6,
"wrong matrix size");
if (t != result) {
THCTensor_(resizeAs)(state, result, t);
if (ScalarConvert<scalar_t, double>::to(beta) != 0.0) {
THCTensor_(copy)(state, result, t);
}
}
bool transpose_result;
char transpose_batch1, transpose_batch2;
int64_t lda, ldb, ldc;
THCTensor *result_, *batch1_, *batch2_;
if (result->stride(1) == 1)
{
transpose_result = false;
result_ = result;
ldc = result_->stride(2);
}
else if (result->stride(2) == 1)
{
transpose_result = true;
THCTensor *swap = batch2;
batch2 = batch1;
batch1 = swap;
result_ = result;
ldc = result_->stride(1);
}
else
{
transpose_result = false;
THCTensor *transp_r_ = THCTensor_(newTranspose)(state, result, 1, 2);
result_ = THCTensor_(newClone)(state, transp_r_);
THCTensor_(free)(state, transp_r_);
THCTensor_(transpose)(state, result_, NULL, 1, 2);
ldc = result_->stride(2);
}
if (batch1->stride(transpose_result ? 2 : 1) == 1 &&
batch1->stride(transpose_result ? 1 : 2) != 0)
{
transpose_batch1 = 'n';
batch1_ = batch1;
lda = batch1_->stride(transpose_result ? 1 : 2);
}
else if (batch1->stride(transpose_result ? 1 : 2) == 1 &&
batch1->stride(transpose_result ? 2 : 1) != 0)
{
transpose_batch1 = 't';
batch1_ = batch1;
lda = batch1_->stride(transpose_result ? 2 : 1);
}
else
{
transpose_batch1 = transpose_result ? 'n' : 't';
// batch1_ is later freed if batch1_ != batch1
if (THCTensor_(isContiguous)(state, batch1)) {
batch1_ = batch1;
} else {
batch1_ = THCTensor_(newContiguous)(state, batch1);
}
lda = batch1_->stride(1);
}
if (batch2->stride(transpose_result ? 2 : 1) == 1 &&
batch2->stride(transpose_result ? 1 : 2) != 0)
{
transpose_batch2 = 'n';
batch2_ = batch2;
ldb = batch2_->stride(transpose_result ? 1 : 2);
}
else if (batch2->stride(transpose_result ? 1 : 2) == 1 &&
batch2->stride(transpose_result ? 2 : 1) != 0)
{
transpose_batch2 = 't';
batch2_ = batch2;
ldb = batch2_->stride(transpose_result ? 2 : 1);
}
else
{
transpose_batch2 = transpose_result ? 'n' : 't';
// batch2_ is later freed if batch2_ != batch2
if (THCTensor_(isContiguous)(state, batch2)) {
batch2_ = batch2;
} else {
batch2_ = THCTensor_(newContiguous)(state, batch2);
}
ldb = batch2_->stride(1);
}
int64_t num_batches = result_->size(0);
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
// Compute pointers to matrices in each batch.
#if CUDA_VERSION < 8000 && !defined __HIP_PLATFORM_HCC__
size_t matrices_size = num_batches * sizeof(scalar_t*);
// Copy pointers to device.
auto d_matrices1 = static_cast<const scalar_t**>(THCudaMalloc(state, matrices_size));
auto d_matrices2 = static_cast<const scalar_t**>(THCudaMalloc(state, matrices_size));
auto d_result_matrices = static_cast<scalar_t**>(THCudaMalloc(state, matrices_size));
const int64_t block = 512;
const int64_t grid = (num_batches + block - 1) / block;
createBatchGemmBuffer3<<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>(
d_matrices1, d_matrices2, (const scalar_t**)d_result_matrices, THCTensor_(data)(state, batch1_),
THCTensor_(data)(state, batch2_), THCTensor_(data)(state, result_),
batch1_->stride(0), batch2_->stride(0), result_->stride(0), num_batches);
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_SgemmBatched(
state,
transpose_batch1,
transpose_batch2,
result_->size(transpose_result ? 2 : 1),
result_->size(transpose_result ? 1 : 2),
batch1_->size(transpose_result ? 1 : 2),
alpha,
d_matrices1, lda,
d_matrices2, ldb,
beta,
d_result_matrices, ldc,
num_batches);
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_DgemmBatched(
state,
transpose_batch1,
transpose_batch2,
result_->size(transpose_result ? 2 : 1),
result_->size(transpose_result ? 1 : 2),
batch1_->size(transpose_result ? 1 : 2),
alpha,
d_matrices1, lda,
d_matrices2, ldb,
beta,
d_result_matrices, ldc,
num_batches);
#endif //THC_REAL
THCudaFree(state, d_matrices1);
THCudaFree(state, d_matrices2);
THCudaFree(state, d_result_matrices);
#else
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_SgemmStridedBatched(
state,
transpose_batch1,
transpose_batch2,
result_->size(transpose_result ? 2 : 1),
result_->size(transpose_result ? 1 : 2),
batch1_->size(transpose_result ? 1 : 2),
alpha,
THCTensor_(data)(state, batch1_), lda, batch1_->stride(0),
THCTensor_(data)(state, batch2_), ldb, batch2_->stride(0),
beta,
THCTensor_(data)(state, result_), ldc, result_->stride(0),
num_batches);
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_DgemmStridedBatched(
state,
transpose_batch1,
transpose_batch2,
result_->size(transpose_result ? 2 : 1),
result_->size(transpose_result ? 1 : 2),
batch1_->size(transpose_result ? 1 : 2),
alpha,
THCTensor_(data)(state, batch1_), lda, batch1_->stride(0),
THCTensor_(data)(state, batch2_), ldb, batch2_->stride(0),
beta,
THCTensor_(data)(state, result_), ldc, result_->stride(0),
num_batches);
#endif //THC_REAL
#endif //CUDA_VERSION
#elif defined(THC_REAL_IS_HALF)
#if CUDA_VERSION < 9010
// Currently no HgemmBatched in Cublas
for (int64_t i = 0; i < num_batches; ++i) {
THCudaBlas_Hgemm(
state,
transpose_batch1,
transpose_batch2,
result_->size(transpose_result ? 2 : 1),
result_->size(transpose_result ? 1 : 2),
batch1_->size(transpose_result ? 1 : 2),
alpha,
THCTensor_(data)(state, batch1_) + i * batch1_->stride(0), lda,
THCTensor_(data)(state, batch2_) + i * batch2_->stride(0), ldb,
beta,
THCTensor_(data)(state, result_) + i * result_->stride(0), ldc);
}
#else
#ifndef __HIP_PLATFORM_HCC__
cudaDeviceProp* prop = at::cuda::getCurrentDeviceProperties();
if (prop->major >= 5){
#endif
THCudaBlas_HgemmStridedBatched(
state,
transpose_batch1,
transpose_batch2,
result_->size(transpose_result ? 2 : 1),
result_->size(transpose_result ? 1 : 2),
batch1_->size(transpose_result ? 1 : 2),
alpha,
THCTensor_(data)(state, batch1_), lda, batch1_->stride(0),
THCTensor_(data)(state, batch2_), ldb, batch2_->stride(0),
beta,
THCTensor_(data)(state, result_), ldc, result_->stride(0),
num_batches);
#ifndef __HIP_PLATFORM_HCC__
} else {
for (int64_t i = 0; i < num_batches; ++i) {
THCudaBlas_Hgemm(
state,
transpose_batch1,
transpose_batch2,
result_->size(transpose_result ? 2 : 1),
result_->size(transpose_result ? 1 : 2),
batch1_->size(transpose_result ? 1 : 2),
alpha,
THCTensor_(data)(state, batch1_) + i * batch1_->stride(0), lda,
THCTensor_(data)(state, batch2_) + i * batch2_->stride(0), ldb,
beta,
THCTensor_(data)(state, result_) + i * result_->stride(0), ldc);
}
}
#endif
#endif //CUDA_VERSION
#elif defined(THC_REAL_IS_BFLOAT16)
#if defined(__HIP_PLATFORM_HCC__)
THCudaBlas_BgemmStridedBatched(
state,
transpose_batch1,
transpose_batch2,
result_->size(transpose_result ? 2 : 1),
result_->size(transpose_result ? 1 : 2),
batch1_->size(transpose_result ? 1 : 2),
alpha,
THCTensor_(data)(state, batch1_), lda, batch1_->stride(0),
THCTensor_(data)(state, batch2_), ldb, batch2_->stride(0),
beta,
THCTensor_(data)(state, result_), ldc, result_->stride(0),
num_batches);
#endif // __HIP_PLATFORM_HCC__
#endif
if (batch1_ != batch1) {
THCTensor_(free)(state, batch1_);
}
if (batch2_ != batch2) {
THCTensor_(free)(state, batch2_);
}
if (result_ != result) {
THCTensor_(freeCopyTo)(state, result_, result);
}
#if defined(THC_REAL_IS_BFLOAT16) && !defined(__HIP_PLATFORM_HCC__)
// To avoid "variable was set but never used" warning
[&transpose_batch1, &transpose_batch2, &lda, &ldb, &ldc]{}();
TORCH_CHECK(false, "BgemmStridedBatched is not supported with at::BFloat16 type");
#endif
}
#if !defined(THC_REAL_IS_BFLOAT16) || defined(__HIP_PLATFORM_HCC__)
at::namedinference::propagate_names_if_nonempty(result, maybe_outnames);
#endif
#else
ERROR_ONLY_FP_TYPES("baddbmm");
#endif
}
#endif
|
088076cf80f166ad569c0c7bd83e8a8de5f4f984.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <fstream>
#include <sstream>
#include <string>
#include <vector>
#include <utility>
#include <boost/lexical_cast.hpp>
#include "math.h"
#include "hipcub/hipcub.hpp"
#include "flexarray.h"
#include "b_plus_tree.cpp"
#include <time.h>
#include <list>
#define __CUDA__ 1
//#define __DEBUG__ 1
#define MAX_DEPTH 3
#define SSTR( x ) static_cast< std::ostringstream & >( \
( std::ostringstream() << std::dec << x ) ).str()
hipStream_t myStream;
hipStream_t debugStream;
using namespace std;
#define cudaCheckError() cudaChkError(__LINE__, __FILE__)
void inline cudaChkError(int line, const char* filename) {
hipError_t err = hipGetLastError();
if (err) std::cout << "Error on line " << line << " of " << filename << " : " << hipGetErrorString(err) << std::endl;
}
template <unsigned int i>
__global__ void debugMark() {
}
size_t description_outer(int *descript1, const size_t size1, const size_t d1,
int *descript2, const size_t size2, const size_t d2,
int *descript_out);
void two_list_freq_count(flexarray_bool *data1/* txi */,
flexarray_bool *data2/* txj */, int *count/* ixj */,
flexarray_bool *outdata/* tx(i*j) */);
int threshold_count(int *descriptions/* (i*j)xd */, const size_t i, const size_t j, const int d, int *count/* j*i */,
flexarray_bool *data/* tx(i*j) */, const size_t t, const int threshold);
int threshold_count_gpu(int *descriptions/* (i*j)xd */, const size_t i, const size_t j, const int d, int *count/* j*i */,
flexarray_bool *data/* tx(i*j) */, const size_t t, const int threshold);
vector<string> explode( const string &delimiter, const string &explodeme);
int main(int argc, char** argv) {
int n_items;
int n_trans;
int min_support = 1;
flexarray_bool *data, *newdata, *first_data, *h_data;
int **freqpattern;
int *this_descriptions = NULL;
int *next_descriptions = NULL;
int *first_descriptions = NULL;
int *freq=NULL, *new_freq;
std::string** names;
//variaveis do jorge
//vector<string> arr;
int numero_de_colunas = 0;
int numero_de_items = 0;
int numero_de_linhas = 0;
int resultado_do_find_int = 0;
int possui = 0;
std::list<string> lista_items;
int i;
int j;
int k;
char delimitador;
stringstream ss;
string str;
string tmp;
string num_result_temp;
char num_result_temp_char[100];
string resultado_do_find;
vector<string> str_vector;
BPlusTree<string,string,3,3> b_plus_tree;
clock_t tStart = clock();
if (argc < 2) {
std::cout << "Usage: " << std::endl << " lb_apriori <input file name>" << std::endl;
return 0;
}
#if defined(__CUDA__)
std::cout << "Processing on GPU" << std::endl;
#else
std::cout << "Processing on CPU" << std::endl;
#endif
std::cout << "Reading input data..." << std::endl;
/*std::ifstream fin(argv[1], std::ifstream::in);
fin >>min_support>>n_items >> n_trans;
std::cout << n_trans << " transactions with " << n_items << " items" << std::endl;
freqpattern = (int**)malloc(sizeof(int*)*(MAX_DEPTH+1));
data = new flexarray_bool(n_trans, n_items, true);
h_data = new flexarray_bool(n_trans, n_items);
names = (std::string**)malloc(sizeof(std::string*)*n_items);
std::cout << "Found items named "<<std::endl;
for (int p=0;p<n_items;p++) {
std::string tmp;
fin>>tmp;
names[p] = new std::string(tmp);
std::cout<<*names[p]<<std::endl;
}*/
//lendo o dataset
freqpattern = (int**)malloc(sizeof(int*)*(MAX_DEPTH+1));
std::cout << "Reading input data..." << std::endl;
std::ifstream fin(argv[1], std::ifstream::in);
fin >> numero_de_colunas >> delimitador;
cout << "O numero de colunas " << numero_de_colunas << std::endl;
cout << "O delimitador " << delimitador << std:: endl;
getline(fin,str);
while(getline(fin,str))
{
str_vector = explode(",",str);
for(i = 0 ; i < str_vector.size();i++)
{
/*ss << i;
num_result_temp = ss.str();
ss.str( std::string() );
ss.clear();
num_result_temp = num_result_temp + "_" + str_vector[i];*/
sprintf(num_result_temp_char,"%d",i);
num_result_temp = num_result_temp_char;
num_result_temp += "_";
num_result_temp += str_vector[i];
//if(! (b_plus_tree.find(str_vector[i]) ) )
/*if(! (b_plus_tree.find(num_result_temp) ) )
{
//cout<<num_result_temp;
//cout << str_vector[i];
//b_plus_tree.insert( str_vector[i] ,SSTR(numero_de_items));
b_plus_tree.insert( num_result_temp ,SSTR(numero_de_items));
numero_de_items++;
//cout << numero_de_items << std::endl;
}*/
possui = 0;
for (std::list<string>::iterator it = lista_items.begin(); it != lista_items.end(); it++)
{
if(*it == num_result_temp) {possui = 1 ;break ;}
}
if(possui == 0)
{
lista_items.push_back(num_result_temp);
numero_de_items++;
}
}
numero_de_linhas++;
}
n_trans = numero_de_linhas;
n_items = numero_de_items;
//data = new flexarray_bool(n_trans, n_items, true);
//h_data = new flexarray_bool(n_trans, n_items);
names = (std::string**)malloc(sizeof(std::string*)*n_items);
//cout << "agora construindo a matrix" << endl;
fin.clear();
fin.seekg(0, ios::beg);
fin >> numero_de_colunas >> delimitador;
//bool matrix[numero_de_linhas][numero_de_items];
int** matrix = new int*[numero_de_linhas];
for(int i = 0; i < numero_de_linhas; ++i) matrix[i] = new int[numero_de_items];
for (int q=0; q<n_trans;q++) {
for (int p=0; p<n_items; p++) {
matrix[q][p] = 0;
}
}
i = 0;
cout << "fazendo a matriz " << endl;
clock_t tstart_matriz = clock();
getline(fin,str);
while(getline(fin,str))
{
//cout << numero_de_items << " " << numero_de_linhas << " " << "TESTE :" << i << " " << str << std::endl;
str_vector = explode(",",str);
//cout << str_vector[0];
for(k = 0;k < str_vector.size();k++)
{
//cout<<k<< " " << str_vector[k] <<std::endl;
//ss << k;
//cout << ss.str() << std::endl;
//num_result_temp = boost::lexical_cast<std::string>(k) + "_" + str_vector[k] ;
sprintf(num_result_temp_char,"%d",k);
//cout << num_result_temp<< std::endl;
//ss.str("");
//ss.clear();
num_result_temp = num_result_temp_char;
num_result_temp += "_";
num_result_temp += str_vector[k];
//cout << num_result_temp + "_" + str_vector[k] << std::endl;
//sprintf(num_result_temp_char,"");
//cout << "vou buscar "<< num_result_temp + "_" + str_vector[k] << std::endl;
//cout<<k<<std::endl;
//b_plus_tree.find(num_result_temp,&resultado_do_find);
for (std::list<string>::iterator it = lista_items.begin(); it != lista_items.end(); it++)
{
if(*it == num_result_temp) {resultado_do_find_int= std::distance(it,lista_items.begin()) ;break;}
}
//resultado_do_find_int = atoi(resultado_do_find.c_str());
//resultado_do_find_int= it - lista_items.begin();
//cout << "K = " << k << " " << str_vector[k] << " " << resultado_do_find_int << endl;
for(j = 0; j < numero_de_items;j++)
{
//cout << resultado_do_find_int << " " << k << " " << i << " " << j << std::endl;
if(j == resultado_do_find_int) {
names[resultado_do_find_int] = new string( num_result_temp);
matrix[i][j] = 1;
//cout << " " << " Ok " << std::endl;
}
else if(matrix[i][j] != 1) matrix[i][j] = 0;
//else if(matrix[i][j] == true) cout << "teste" << std::endl;
}
}
/*for(j=0;j<= numero_de_items;j++)
{
cout<<matrix[i][j]<< " ";
}
cout << std::endl;*/
i++;
}
printf("Time taken matriz: %.4fs\n", (double)(clock() - tstart_matriz)/CLOCKS_PER_SEC);
/*std::cout << "Found " << n_items << " items named "<<std::endl;
for (int i=0;i<n_items;i++) {
std::cout<< *names[i] << std::endl;
//cout<< i << " " << *names[i]<<std::endl;
}*/
/*for (int q=0; q<n_trans;q++) {
for (int p=0; p<n_items; p++) {
cout << matrix[q][p] << " ";
}
cout << std::endl;
}*/
// Set input data
cout << "alocando a matrix" << std::endl;
for (int q=0; q<n_trans;q++) {
for (int p=0; p<n_items; p++) {
//int tmp;
//fin>>tmp;
//cout << q << " " << p << std::endl;
if (matrix[q][p] == true || matrix[q][p] == 1)
{
h_data->set(q,p,true);
}
else {
h_data->set(q,p,false);
}
//if (tmp>0) (*data)(q,p) = true;
//else (*data)(q,p) = false;
}
}
cout << "a matriz possui tamanho: " << n_trans << " x " << n_items ;
cout << "O tamanho da matriz no condensada " << sizeof(matrix);
cout << "O tamanho da matriz condensada " << sizeof(h_data);
return 0;
clock_t tstart_cuda_malloc = clock();
/*int *ppArray_a;
hipMalloc((void**)&ppArray_a, n_trans * sizeof(int*));
for(int i=0; i<n_trans; i++)
{
hipMalloc(&matrix[i], n_items*sizeof(int));
}
hipMemcpy(ppArray_a, matrix, n_trans*sizeof(int *), hipMemcpyHostToDevice);*/
hipMemcpy(data->data, h_data->data, sizeof(unsigned int)*h_data->real_c*h_data->r/32, hipMemcpyHostToDevice);
cudaCheckError();
hipDeviceSynchronize();
// hipFree(ppArray_a);
printf("Time taken matriz: %.4fs\n", (double)(clock() - tstart_cuda_malloc)/CLOCKS_PER_SEC);
return 0;
hipMemcpy(data->data, h_data->data, sizeof(unsigned int)*h_data->real_c*h_data->r/32, hipMemcpyHostToDevice);
hipMemcpy(data->data, h_data->data, sizeof(unsigned int)*h_data->real_c*h_data->r/32, hipMemcpyHostToDevice);
cout << "matrix alocada" << std::endl;
clock_t tstart_cuda = clock();
hipMemcpy(data->data, h_data->data, sizeof(unsigned int)*h_data->real_c*h_data->r/32, hipMemcpyHostToDevice);
printf("Time taken cuda: %.4fs\n", (double)(clock() - tstart_cuda)/CLOCKS_PER_SEC);
int this_size=n_items;
first_descriptions = (int*)malloc(sizeof(int)*n_items);
this_descriptions = (int*)malloc(sizeof(int)); //allocate something for freeing
int last_size=1;
//first_data = new flexarray_bool(n_trans, 1, true);
first_data = new flexarray_bool(n_trans, 1, true);
for (int p=0; p<n_items; p++) {
first_descriptions[p] = p;
}
for (int q=0; q<n_trans; q++) {
h_data->set(q,0,true);
}
tstart_cuda = clock();
hipMemcpy(first_data->data, h_data->data, sizeof(unsigned int)*first_data->real_c*first_data->r/32, hipMemcpyHostToDevice);
printf("Time taken cuda: %.4fs\n", (double)(clock() - tstart_cuda)/CLOCKS_PER_SEC);
//display_flexarray(first_data);
delete(h_data);
hipStreamCreate(&myStream);
hipStreamCreate(&debugStream);
for (int q=0; q<n_trans; q++) {
first_data->set(q,0,true);
}
for (int depth=1;depth<MAX_DEPTH;depth++) {
std::cout << std::endl << " **** DEPTH = " << depth << " **** " << std::endl;
cudaCheckError();
this_descriptions = next_descriptions;
next_descriptions = (int*)malloc(sizeof(int)*depth*last_size*n_items);
this_size = last_size * n_items;
//next_size = cull_descriptions(next_descriptions, next_size, depth);
#if defined(__CUDA__)
hipMalloc(&freqpattern[depth],sizeof(int)*this_size);
hipMemsetAsync(freqpattern[depth],0 ,sizeof(int)*this_size, myStream);
cudaCheckError();
#else
freqpattern[depth] = (int*)malloc(sizeof(int)*this_size);
#endif
newdata = new flexarray_bool(n_trans, this_size, true);
two_list_freq_count(data, first_data, freqpattern[depth], newdata);
cudaCheckError();
hipLaunchKernelGGL(( debugMark<1>), dim3(1),dim3(1),0,debugStream, );
this_size = description_outer(this_descriptions, last_size, depth-1, first_descriptions, n_items, 1,
next_descriptions);
hipLaunchKernelGGL(( debugMark<2>), dim3(1),dim3(1),0,debugStream, );
#if defined(__DEBUG__)
#if defined(__CUDA__)
new_freq = (int*) realloc(freq, sizeof(int)*this_size);
freq = new_freq;
tstart_cuda = clock();
hipMemcpy(freq, freqpattern[depth], sizeof(int)*this_size, hipMemcpyDeviceToHost);
printf("Time taken cuda: %.2fs\n", (double)(clock() - tstart_cuda)/CLOCKS_PER_SEC);
cudaCheckError();
#else
freq = freqpattern[depth];
#endif
for (int p=0; p<this_size; p++) {
std::cout << p << " : ";
for (int d=0; d<depth; d++) {
std::cout << next_descriptions[p*depth+d] <<", ";
}
std::cout << " ==> " << freq[p] << std::endl;
}
for (int q=0;q<n_trans;q++) {
for (int p=0;p<this_size;p++) {
std::cout << (*newdata)(q,p) << " ";
}
std::cout << std::endl;
}
std::cout << "Threshold. last size : " << last_size << " n_items: " << n_items << std::endl;
#endif
#if defined(__CUDA__)
this_size = threshold_count_gpu(next_descriptions, last_size, n_items, depth, freqpattern[depth], newdata, n_trans, min_support);
freqpattern[depth]+=last_size*n_items-this_size;
#else
this_size = threshold_count(next_descriptions, last_size, n_items, depth, freqpattern[depth], newdata, n_trans, min_support);
#endif
cudaCheckError();
#if defined(__DEBUG__)
#if defined(__CUDA__)
new_freq = (int*) realloc(freq, sizeof(int)*this_size);
freq = new_freq;
hipMemcpy(freq, freqpattern[depth], sizeof(int)*this_size, hipMemcpyDeviceToHost);
cudaCheckError();
#else
freq = freqpattern[depth];
#endif
for (int p=0; p<this_size; p++) {
std::cout << p << " : ";
for (int d=0; d<depth; d++) {
std::cout << next_descriptions[p*depth+d] <<", ";
}
std::cout << " ==> " << freq[p] << std::endl;
}
for (int q=0;q<n_trans;q++) {
for (int p=0;p<last_size*n_items;p++) {
std::cout << (*newdata)(q,p) << " ";
}
std::cout << std::endl;
}
#endif
#if defined(__CUDA__)
new_freq = (int*) realloc(freq, sizeof(int)*this_size);
freq = new_freq;
hipMemcpy(freq, freqpattern[depth], sizeof(int)*this_size, hipMemcpyDeviceToHost);
cudaCheckError();
#else
freq = freqpattern[depth];
#endif
for (int p=0;p<this_size;p++) {
//std::cout<<min_support<<std::endl;
if (freq[p]>min_support) {
std::cout<<"{";
for (int d=0; d<depth; d++) std::cout<< *names[next_descriptions[p*depth+d]] << ", ";
std::cout<< "},";
std::cout<<freq[p]<<std::endl;
}
}
#if defined(__CUDA__)
hipFree(freqpattern[depth]-last_size*n_items+this_size);
#else
free(freqpattern[depth]);
#endif
last_size = this_size;
if (depth==1) {
n_items = last_size;
first_descriptions = next_descriptions;
first_data = newdata;
} else {
if (data != first_data) delete(data);
}
data = newdata;
if (last_size < depth) break;
#if defined(__DEBUG__)
std::cout << "Data: " << std::endl;
display_flexarray(data);
std::cout << "First_data: " << std::endl;
display_flexarray(first_data);
#endif
}
printf("Time taken: %.2fs\n", (double)(clock() - tStart)/CLOCKS_PER_SEC);
free(this_descriptions);
free(next_descriptions);
#if defined(__CUDA__)
free(freq);
#endif
if (first_data != data) delete(first_data);
delete(data);
hipStreamDestroy(myStream);
hipStreamDestroy(debugStream);
}
size_t description_outer(int *descript1, const size_t size1, const size_t d1,
int *descript2, const size_t size2, const size_t d2,
int *descript_out) {
for (int q=0; q < size1; q++) {
for (int p=0; p < size2; p++) {
for (int dd=0; dd < d1; dd++) {
descript_out[(d1+d2)*size2*q + (d1+d2)*p + dd] = descript1[q*d1 + dd];
}
for (int dd=0; dd < d2; dd++) {
descript_out[(d1+d2)*size2*q + (d1+d2)*p + d1+ dd] = descript2[p*d2 + dd];
}
}
}
return size1 * size2;
}
__global__ void two_list_freq_count_kernel(unsigned int *data1, size_t rows, size_t cols1, size_t size1,
unsigned int *data2, size_t cols2, size_t size2,
int *count, unsigned int *outdata, size_t outsize) {
size_t ii = threadIdx.x + blockDim.x * blockIdx.x;
size_t jj = (threadIdx.y + blockDim.y * blockIdx.y) * 32;
size_t tt0 = (threadIdx.z + blockDim.z * blockIdx.z) * 100;
int sum[32];
unsigned int chtmp, chout;
bool b2;
if (ii < cols1 && jj < cols2) {
for(size_t cnt=0;cnt<32;cnt++) {
sum[cnt]=0;
}
for (size_t tt=tt0; tt < tt0 + 100 && tt<rows ; tt++) {
chout = 0;
chtmp = __ldg(&data1[(tt*size1 + ii)/32]);
if ( (chtmp & (unsigned int)(ONEBIT>>(ii%32))) != 0) {
chtmp = __ldg(&data2[(tt*size2 + jj)/32]);
chout |= chtmp;
size_t cnt;
for (cnt=0; cnt<32; cnt++) {
b2 = (chtmp & (unsigned int)(ONEBIT>>cnt)) != 0;
if (b2) {
if (jj+cnt < cols2) {
sum[cnt]++;
} else {
//Don't write past the end of the data2 row
chout -= chout & (ONEBIT>>cnt);
}
}
}
atomicOr(&outdata[(tt*outsize + ii*cols2 + jj)/32],
chout>>((tt*outsize + ii*cols2 + jj)%32));
}
}
for(size_t cnt=0;cnt<32;cnt++) {
if (jj+cnt < cols2) {
atomicAdd(&count[cols2*ii + jj + cnt ], sum[cnt]);
}
}
}
}
void two_list_freq_count(flexarray_bool *data1/* txi */,
flexarray_bool *data2/* txj */, int *count/* ixj */,
flexarray_bool *outdata/* tx(i*j) */) {
#if defined(__CUDA__)
hipMemset(outdata->data, 0, sizeof(unsigned int)*outdata->real_c*outdata->r/32);
cudaCheckError();
hipLaunchKernelGGL(( two_list_freq_count_kernel), dim3(dim3((data1->c+31)/32,(data2->c+7)/8, (data1->r+99)/100)),
dim3(dim3(32,8,1)), 0, myStream,
data1->data, data1->r, data1->c, data1->real_c,
data2->data, data2->c, data2->real_c, count,
outdata->data, outdata->real_c);
cudaCheckError();
#else
for (int ii=0;ii < data1->c; ii++ ) {
for (int jj=0; jj < data2->c; jj++ ) {
int sum = 0;
for (int tt=0; tt < data1->r; tt++) {
if ((*data1)(tt,ii) && (*data2)(tt,jj)) sum++;
outdata->set(tt, ii*data2->c + jj, (*data1)(tt,ii) && (*data2)(tt,jj));
}
count[data2->c*ii + jj] = sum;
}
}
#endif
}
#if 1
// set count to zero for any sets not in ascending order
// since the first d-1 elements of a set are guaranteed to be in order (because we did this step
// last time) we only need to check each element against the last
// also, fills an int* with an ordered set of integers from 0 to length
__global__ void zero_bad_sets(int* count, size_t length, int *range, const int* descriptions, const int d) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (d > 1 && idx < length*(d-1)) {
int newi = d-1 + d*(idx/(d-1));
int ii = idx%(d-1) + d*(idx/(d-1));
if (descriptions[newi] <= descriptions[ii]) {
count[idx/(d-1)] = 0;
}
}
if (idx < length) range[idx] = idx;
}
// Rearrange the flexarray_bool data according to "order"
__global__ void rearrange_data(unsigned int *outdata, const unsigned int *data, const size_t cols, const size_t real_c, const size_t rows, int* order ) {
int ii = (threadIdx.x + blockDim.x * blockIdx.x)*32;
int tt = threadIdx.y + blockDim.y * blockIdx.y;
unsigned int iout = 0;
unsigned int mask = (unsigned int)1<<31;
unsigned int iin;
int inidx;
if (tt < rows && ii<cols) {
for(int cnt = 0; ii+cnt < cols && cnt < 32; cnt++) {
inidx = order[ii+cnt];
iin = data[(tt*real_c + inidx)/32];
if ((iin & (ONEBIT>>(inidx%32))) != 0) {
iout |= mask;
}
mask >>=1;
}
outdata[(tt*real_c + ii)/32]= iout;
}
}
// In a set assumed to be ordered, find the first element greater than
// or equal to threshold
__global__ void find_size(int *result, int *count, size_t max, int threshold) {
int ii = threadIdx.x + blockDim.x * blockIdx.x;
if (ii==0) {
if (count[ii] >= threshold) *result = max;
} else if (ii < max) {
if(count[ii] >=threshold && count[ii-1] < threshold) {
*result = max-ii;
}
}
}
int threshold_count_gpu(int *descriptions/* (i*j)xd */, const size_t i, const size_t j, const int d, int *count/* j*i */,
flexarray_bool *data/* tx(i*j) */, const size_t t, const int threshold) {
int *range;
int *count_buf, *range_buf;
hipMalloc(&range, sizeof(int)*i*j);
int *d_descriptions;
if (d >1) {
hipMalloc(&d_descriptions, sizeof(int)*d*i*j);
hipMemcpy(d_descriptions, descriptions, sizeof(int)*d*i*j, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( zero_bad_sets), dim3((i*j*(d-1)+127)/128), dim3(128), 0, 0, count, i*j, range, d_descriptions, d);
} else {
//just initialize range
hipLaunchKernelGGL(( zero_bad_sets), dim3((i*j+127)/128), dim3(128), 0, 0, count, i*j, range, d_descriptions, d);
}
hipMalloc(&count_buf, sizeof(int)*i*j);
hipMalloc(&range_buf, sizeof(int)*i*j);
cub::DoubleBuffer<int> d_count(count, count_buf);
cub::DoubleBuffer<int> d_range(range, range_buf);
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
hipcub::DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, d_count,
d_range, i*j);
hipMalloc(&d_temp_storage, temp_storage_bytes);
hipcub::DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, d_count,
d_range, i*j);
int new_size = i*j;
//count_buf is just a place to put the result
hipLaunchKernelGGL(( find_size), dim3((i*j+127)/128),dim3(128), 0, 0, count_buf, count, i*j, threshold);
hipMemcpy(&new_size, count_buf, sizeof(int), hipMemcpyDeviceToHost);
if (new_size == 0) {
data->c = 0;
return 0;
}
cub::DoubleBuffer<int> d_range_new(range+(i*j-new_size), range_buf);
cub::DoubleBuffer<int> d_count_new(count+(i*j-new_size), count_buf);
hipcub::DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes,
d_range_new, d_count_new, new_size);
hipFree(d_temp_storage);
hipFree(range_buf);
hipFree(count_buf);
cudaCheckError();
//copy range data to host
int *h_range = (int*) malloc(sizeof(int)*new_size);
hipMemcpy(h_range, range+(i*j-new_size), sizeof(int)*new_size, hipMemcpyDeviceToHost);
//duplicate data on the device
flexarray_bool *indata = new flexarray_bool(data->r, data->real_c, true);
hipMemcpyAsync(indata->data, data->data, sizeof(unsigned int) * data->r * data->real_c/32, hipMemcpyDeviceToDevice, myStream);
cudaCheckError();
//simultaneously process descriptions (on the host) and data (on the device)
hipLaunchKernelGGL(( rearrange_data), dim3(dim3((new_size+31)/32,(t+7)/8)), dim3(dim3(32,8)), 0, myStream, data->data, indata->data, new_size, data->real_c, t, range+(i*j-new_size));
cudaCheckError();
for (int p=0; p< new_size; p++) {
int inidx = h_range[p];
for (int dd=0; dd<d; dd++) {
descriptions[d*p+dd] = descriptions[d*inidx+dd];
}
}
free(h_range);
delete(indata);
hipFree(range);
if (d >1) {
hipFree(d_descriptions);
}
cudaCheckError();
data->c = new_size;
return new_size;
}
#endif
int threshold_count(int *descriptions/* (i*j)xd */, const size_t i, const size_t j, const int d, int *count/* j*i */,
flexarray_bool *data/* tx(i*j) */, const size_t t, const int threshold) {
int *h_count;
#if defined(__CUDA__)
cudaCheckError();
h_count = (int*)malloc(sizeof(int)*i*j);
hipMemcpy(h_count, count, sizeof(int)*i*j, hipMemcpyDeviceToHost);
cudaCheckError();
#else
h_count = count;
#endif
size_t inx=0; //breaks parallelization
for (int jj=0; jj < i*j; jj++) {
if (h_count[jj] >= threshold) {
bool dup = false;
for (int dd=0; dd < d; dd++) {
if (dd < d-1 && descriptions[d*jj + dd] >= descriptions[d*jj+d-1]) dup = true;
descriptions[d*inx + dd] = descriptions[d*jj + dd];
}
if (dup) continue;
for (int tt=0; tt < data->r; tt++) {
data->set(tt,inx,(*data)(tt,jj));
cudaCheckError();
}
h_count[inx] = h_count[jj];
inx++;
}
}
data->c = inx;
#if defined(__CUDA__)
cudaCheckError();
hipMemcpy(count, h_count, sizeof(int)*i*j, hipMemcpyHostToDevice);
cudaCheckError();
free(h_count);
#endif
return inx;
}
//parte do jorge
vector<string> explode( const string &delimiter, const string &str)
{
vector<string> arr;
int strleng = str.length();
int delleng = delimiter.length();
if (delleng==0)
return arr;//no change
int i=0;
int k=0;
while( i<strleng )
{
int j=0;
while (i+j<strleng && j<delleng && str[i+j]==delimiter[j])
j++;
if (j==delleng)//found delimiter
{
arr.push_back( str.substr(k, i-k) );
i+=delleng;
k=i;
}
else
{
i++;
}
}
arr.push_back( str.substr(k, i-k) );
return arr;
}
|
088076cf80f166ad569c0c7bd83e8a8de5f4f984.cu
|
#include <iostream>
#include <fstream>
#include <sstream>
#include <string>
#include <vector>
#include <utility>
#include <boost/lexical_cast.hpp>
#include "math.h"
#include "cub/cub.cuh"
#include "flexarray.h"
#include "b_plus_tree.cpp"
#include <time.h>
#include <list>
#define __CUDA__ 1
//#define __DEBUG__ 1
#define MAX_DEPTH 3
#define SSTR( x ) static_cast< std::ostringstream & >( \
( std::ostringstream() << std::dec << x ) ).str()
cudaStream_t myStream;
cudaStream_t debugStream;
using namespace std;
#define cudaCheckError() cudaChkError(__LINE__, __FILE__)
void inline cudaChkError(int line, const char* filename) {
cudaError_t err = cudaGetLastError();
if (err) std::cout << "Error on line " << line << " of " << filename << " : " << cudaGetErrorString(err) << std::endl;
}
template <unsigned int i>
__global__ void debugMark() {
}
size_t description_outer(int *descript1, const size_t size1, const size_t d1,
int *descript2, const size_t size2, const size_t d2,
int *descript_out);
void two_list_freq_count(flexarray_bool *data1/* txi */,
flexarray_bool *data2/* txj */, int *count/* ixj */,
flexarray_bool *outdata/* tx(i*j) */);
int threshold_count(int *descriptions/* (i*j)xd */, const size_t i, const size_t j, const int d, int *count/* j*i */,
flexarray_bool *data/* tx(i*j) */, const size_t t, const int threshold);
int threshold_count_gpu(int *descriptions/* (i*j)xd */, const size_t i, const size_t j, const int d, int *count/* j*i */,
flexarray_bool *data/* tx(i*j) */, const size_t t, const int threshold);
vector<string> explode( const string &delimiter, const string &explodeme);
int main(int argc, char** argv) {
int n_items;
int n_trans;
int min_support = 1;
flexarray_bool *data, *newdata, *first_data, *h_data;
int **freqpattern;
int *this_descriptions = NULL;
int *next_descriptions = NULL;
int *first_descriptions = NULL;
int *freq=NULL, *new_freq;
std::string** names;
//variaveis do jorge
//vector<string> arr;
int numero_de_colunas = 0;
int numero_de_items = 0;
int numero_de_linhas = 0;
int resultado_do_find_int = 0;
int possui = 0;
std::list<string> lista_items;
int i;
int j;
int k;
char delimitador;
stringstream ss;
string str;
string tmp;
string num_result_temp;
char num_result_temp_char[100];
string resultado_do_find;
vector<string> str_vector;
BPlusTree<string,string,3,3> b_plus_tree;
clock_t tStart = clock();
if (argc < 2) {
std::cout << "Usage: " << std::endl << " lb_apriori <input file name>" << std::endl;
return 0;
}
#if defined(__CUDA__)
std::cout << "Processing on GPU" << std::endl;
#else
std::cout << "Processing on CPU" << std::endl;
#endif
std::cout << "Reading input data..." << std::endl;
/*std::ifstream fin(argv[1], std::ifstream::in);
fin >>min_support>>n_items >> n_trans;
std::cout << n_trans << " transactions with " << n_items << " items" << std::endl;
freqpattern = (int**)malloc(sizeof(int*)*(MAX_DEPTH+1));
data = new flexarray_bool(n_trans, n_items, true);
h_data = new flexarray_bool(n_trans, n_items);
names = (std::string**)malloc(sizeof(std::string*)*n_items);
std::cout << "Found items named "<<std::endl;
for (int p=0;p<n_items;p++) {
std::string tmp;
fin>>tmp;
names[p] = new std::string(tmp);
std::cout<<*names[p]<<std::endl;
}*/
//lendo o dataset
freqpattern = (int**)malloc(sizeof(int*)*(MAX_DEPTH+1));
std::cout << "Reading input data..." << std::endl;
std::ifstream fin(argv[1], std::ifstream::in);
fin >> numero_de_colunas >> delimitador;
cout << "O numero de colunas é " << numero_de_colunas << std::endl;
cout << "O delimitador é " << delimitador << std:: endl;
getline(fin,str);
while(getline(fin,str))
{
str_vector = explode(",",str);
for(i = 0 ; i < str_vector.size();i++)
{
/*ss << i;
num_result_temp = ss.str();
ss.str( std::string() );
ss.clear();
num_result_temp = num_result_temp + "_" + str_vector[i];*/
sprintf(num_result_temp_char,"%d",i);
num_result_temp = num_result_temp_char;
num_result_temp += "_";
num_result_temp += str_vector[i];
//if(! (b_plus_tree.find(str_vector[i]) ) )
/*if(! (b_plus_tree.find(num_result_temp) ) )
{
//cout<<num_result_temp;
//cout << str_vector[i];
//b_plus_tree.insert( str_vector[i] ,SSTR(numero_de_items));
b_plus_tree.insert( num_result_temp ,SSTR(numero_de_items));
numero_de_items++;
//cout << numero_de_items << std::endl;
}*/
possui = 0;
for (std::list<string>::iterator it = lista_items.begin(); it != lista_items.end(); it++)
{
if(*it == num_result_temp) {possui = 1 ;break ;}
}
if(possui == 0)
{
lista_items.push_back(num_result_temp);
numero_de_items++;
}
}
numero_de_linhas++;
}
n_trans = numero_de_linhas;
n_items = numero_de_items;
//data = new flexarray_bool(n_trans, n_items, true);
//h_data = new flexarray_bool(n_trans, n_items);
names = (std::string**)malloc(sizeof(std::string*)*n_items);
//cout << "agora construindo a matrix" << endl;
fin.clear();
fin.seekg(0, ios::beg);
fin >> numero_de_colunas >> delimitador;
//bool matrix[numero_de_linhas][numero_de_items];
int** matrix = new int*[numero_de_linhas];
for(int i = 0; i < numero_de_linhas; ++i) matrix[i] = new int[numero_de_items];
for (int q=0; q<n_trans;q++) {
for (int p=0; p<n_items; p++) {
matrix[q][p] = 0;
}
}
i = 0;
cout << "fazendo a matriz " << endl;
clock_t tstart_matriz = clock();
getline(fin,str);
while(getline(fin,str))
{
//cout << numero_de_items << " " << numero_de_linhas << " " << "TESTE :" << i << " " << str << std::endl;
str_vector = explode(",",str);
//cout << str_vector[0];
for(k = 0;k < str_vector.size();k++)
{
//cout<<k<< " " << str_vector[k] <<std::endl;
//ss << k;
//cout << ss.str() << std::endl;
//num_result_temp = boost::lexical_cast<std::string>(k) + "_" + str_vector[k] ;
sprintf(num_result_temp_char,"%d",k);
//cout << num_result_temp<< std::endl;
//ss.str("");
//ss.clear();
num_result_temp = num_result_temp_char;
num_result_temp += "_";
num_result_temp += str_vector[k];
//cout << num_result_temp + "_" + str_vector[k] << std::endl;
//sprintf(num_result_temp_char,"");
//cout << "vou buscar "<< num_result_temp + "_" + str_vector[k] << std::endl;
//cout<<k<<std::endl;
//b_plus_tree.find(num_result_temp,&resultado_do_find);
for (std::list<string>::iterator it = lista_items.begin(); it != lista_items.end(); it++)
{
if(*it == num_result_temp) {resultado_do_find_int= std::distance(it,lista_items.begin()) ;break;}
}
//resultado_do_find_int = atoi(resultado_do_find.c_str());
//resultado_do_find_int= it - lista_items.begin();
//cout << "K = " << k << " " << str_vector[k] << " " << resultado_do_find_int << endl;
for(j = 0; j < numero_de_items;j++)
{
//cout << resultado_do_find_int << " " << k << " " << i << " " << j << std::endl;
if(j == resultado_do_find_int) {
names[resultado_do_find_int] = new string( num_result_temp);
matrix[i][j] = 1;
//cout << " " << " Ok " << std::endl;
}
else if(matrix[i][j] != 1) matrix[i][j] = 0;
//else if(matrix[i][j] == true) cout << "teste" << std::endl;
}
}
/*for(j=0;j<= numero_de_items;j++)
{
cout<<matrix[i][j]<< " ";
}
cout << std::endl;*/
i++;
}
printf("Time taken matriz: %.4fs\n", (double)(clock() - tstart_matriz)/CLOCKS_PER_SEC);
/*std::cout << "Found " << n_items << " items named "<<std::endl;
for (int i=0;i<n_items;i++) {
std::cout<< *names[i] << std::endl;
//cout<< i << " " << *names[i]<<std::endl;
}*/
/*for (int q=0; q<n_trans;q++) {
for (int p=0; p<n_items; p++) {
cout << matrix[q][p] << " ";
}
cout << std::endl;
}*/
// Set input data
cout << "alocando a matrix" << std::endl;
for (int q=0; q<n_trans;q++) {
for (int p=0; p<n_items; p++) {
//int tmp;
//fin>>tmp;
//cout << q << " " << p << std::endl;
if (matrix[q][p] == true || matrix[q][p] == 1)
{
h_data->set(q,p,true);
}
else {
h_data->set(q,p,false);
}
//if (tmp>0) (*data)(q,p) = true;
//else (*data)(q,p) = false;
}
}
cout << "a matriz possui tamanho: " << n_trans << " x " << n_items ;
cout << "O tamanho da matriz não condensada é " << sizeof(matrix);
cout << "O tamanho da matriz condensada é " << sizeof(h_data);
return 0;
clock_t tstart_cuda_malloc = clock();
/*int *ppArray_a;
cudaMalloc((void**)&ppArray_a, n_trans * sizeof(int*));
for(int i=0; i<n_trans; i++)
{
cudaMalloc(&matrix[i], n_items*sizeof(int));
}
cudaMemcpy(ppArray_a, matrix, n_trans*sizeof(int *), cudaMemcpyHostToDevice);*/
cudaMemcpy(data->data, h_data->data, sizeof(unsigned int)*h_data->real_c*h_data->r/32, cudaMemcpyHostToDevice);
cudaCheckError();
cudaDeviceSynchronize();
// cudaFree(ppArray_a);
printf("Time taken matriz: %.4fs\n", (double)(clock() - tstart_cuda_malloc)/CLOCKS_PER_SEC);
return 0;
cudaMemcpy(data->data, h_data->data, sizeof(unsigned int)*h_data->real_c*h_data->r/32, cudaMemcpyHostToDevice);
cudaMemcpy(data->data, h_data->data, sizeof(unsigned int)*h_data->real_c*h_data->r/32, cudaMemcpyHostToDevice);
cout << "matrix alocada" << std::endl;
clock_t tstart_cuda = clock();
cudaMemcpy(data->data, h_data->data, sizeof(unsigned int)*h_data->real_c*h_data->r/32, cudaMemcpyHostToDevice);
printf("Time taken cuda: %.4fs\n", (double)(clock() - tstart_cuda)/CLOCKS_PER_SEC);
int this_size=n_items;
first_descriptions = (int*)malloc(sizeof(int)*n_items);
this_descriptions = (int*)malloc(sizeof(int)); //allocate something for freeing
int last_size=1;
//first_data = new flexarray_bool(n_trans, 1, true);
first_data = new flexarray_bool(n_trans, 1, true);
for (int p=0; p<n_items; p++) {
first_descriptions[p] = p;
}
for (int q=0; q<n_trans; q++) {
h_data->set(q,0,true);
}
tstart_cuda = clock();
cudaMemcpy(first_data->data, h_data->data, sizeof(unsigned int)*first_data->real_c*first_data->r/32, cudaMemcpyHostToDevice);
printf("Time taken cuda: %.4fs\n", (double)(clock() - tstart_cuda)/CLOCKS_PER_SEC);
//display_flexarray(first_data);
delete(h_data);
cudaStreamCreate(&myStream);
cudaStreamCreate(&debugStream);
for (int q=0; q<n_trans; q++) {
first_data->set(q,0,true);
}
for (int depth=1;depth<MAX_DEPTH;depth++) {
std::cout << std::endl << " **** DEPTH = " << depth << " **** " << std::endl;
cudaCheckError();
this_descriptions = next_descriptions;
next_descriptions = (int*)malloc(sizeof(int)*depth*last_size*n_items);
this_size = last_size * n_items;
//next_size = cull_descriptions(next_descriptions, next_size, depth);
#if defined(__CUDA__)
cudaMalloc(&freqpattern[depth],sizeof(int)*this_size);
cudaMemsetAsync(freqpattern[depth],0 ,sizeof(int)*this_size, myStream);
cudaCheckError();
#else
freqpattern[depth] = (int*)malloc(sizeof(int)*this_size);
#endif
newdata = new flexarray_bool(n_trans, this_size, true);
two_list_freq_count(data, first_data, freqpattern[depth], newdata);
cudaCheckError();
debugMark<1><<<1,1,0,debugStream>>>();
this_size = description_outer(this_descriptions, last_size, depth-1, first_descriptions, n_items, 1,
next_descriptions);
debugMark<2><<<1,1,0,debugStream>>>();
#if defined(__DEBUG__)
#if defined(__CUDA__)
new_freq = (int*) realloc(freq, sizeof(int)*this_size);
freq = new_freq;
tstart_cuda = clock();
cudaMemcpy(freq, freqpattern[depth], sizeof(int)*this_size, cudaMemcpyDeviceToHost);
printf("Time taken cuda: %.2fs\n", (double)(clock() - tstart_cuda)/CLOCKS_PER_SEC);
cudaCheckError();
#else
freq = freqpattern[depth];
#endif
for (int p=0; p<this_size; p++) {
std::cout << p << " : ";
for (int d=0; d<depth; d++) {
std::cout << next_descriptions[p*depth+d] <<", ";
}
std::cout << " ==> " << freq[p] << std::endl;
}
for (int q=0;q<n_trans;q++) {
for (int p=0;p<this_size;p++) {
std::cout << (*newdata)(q,p) << " ";
}
std::cout << std::endl;
}
std::cout << "Threshold. last size : " << last_size << " n_items: " << n_items << std::endl;
#endif
#if defined(__CUDA__)
this_size = threshold_count_gpu(next_descriptions, last_size, n_items, depth, freqpattern[depth], newdata, n_trans, min_support);
freqpattern[depth]+=last_size*n_items-this_size;
#else
this_size = threshold_count(next_descriptions, last_size, n_items, depth, freqpattern[depth], newdata, n_trans, min_support);
#endif
cudaCheckError();
#if defined(__DEBUG__)
#if defined(__CUDA__)
new_freq = (int*) realloc(freq, sizeof(int)*this_size);
freq = new_freq;
cudaMemcpy(freq, freqpattern[depth], sizeof(int)*this_size, cudaMemcpyDeviceToHost);
cudaCheckError();
#else
freq = freqpattern[depth];
#endif
for (int p=0; p<this_size; p++) {
std::cout << p << " : ";
for (int d=0; d<depth; d++) {
std::cout << next_descriptions[p*depth+d] <<", ";
}
std::cout << " ==> " << freq[p] << std::endl;
}
for (int q=0;q<n_trans;q++) {
for (int p=0;p<last_size*n_items;p++) {
std::cout << (*newdata)(q,p) << " ";
}
std::cout << std::endl;
}
#endif
#if defined(__CUDA__)
new_freq = (int*) realloc(freq, sizeof(int)*this_size);
freq = new_freq;
cudaMemcpy(freq, freqpattern[depth], sizeof(int)*this_size, cudaMemcpyDeviceToHost);
cudaCheckError();
#else
freq = freqpattern[depth];
#endif
for (int p=0;p<this_size;p++) {
//std::cout<<min_support<<std::endl;
if (freq[p]>min_support) {
std::cout<<"{";
for (int d=0; d<depth; d++) std::cout<< *names[next_descriptions[p*depth+d]] << ", ";
std::cout<< "},";
std::cout<<freq[p]<<std::endl;
}
}
#if defined(__CUDA__)
cudaFree(freqpattern[depth]-last_size*n_items+this_size);
#else
free(freqpattern[depth]);
#endif
last_size = this_size;
if (depth==1) {
n_items = last_size;
first_descriptions = next_descriptions;
first_data = newdata;
} else {
if (data != first_data) delete(data);
}
data = newdata;
if (last_size < depth) break;
#if defined(__DEBUG__)
std::cout << "Data: " << std::endl;
display_flexarray(data);
std::cout << "First_data: " << std::endl;
display_flexarray(first_data);
#endif
}
printf("Time taken: %.2fs\n", (double)(clock() - tStart)/CLOCKS_PER_SEC);
free(this_descriptions);
free(next_descriptions);
#if defined(__CUDA__)
free(freq);
#endif
if (first_data != data) delete(first_data);
delete(data);
cudaStreamDestroy(myStream);
cudaStreamDestroy(debugStream);
}
size_t description_outer(int *descript1, const size_t size1, const size_t d1,
int *descript2, const size_t size2, const size_t d2,
int *descript_out) {
for (int q=0; q < size1; q++) {
for (int p=0; p < size2; p++) {
for (int dd=0; dd < d1; dd++) {
descript_out[(d1+d2)*size2*q + (d1+d2)*p + dd] = descript1[q*d1 + dd];
}
for (int dd=0; dd < d2; dd++) {
descript_out[(d1+d2)*size2*q + (d1+d2)*p + d1+ dd] = descript2[p*d2 + dd];
}
}
}
return size1 * size2;
}
__global__ void two_list_freq_count_kernel(unsigned int *data1, size_t rows, size_t cols1, size_t size1,
unsigned int *data2, size_t cols2, size_t size2,
int *count, unsigned int *outdata, size_t outsize) {
size_t ii = threadIdx.x + blockDim.x * blockIdx.x;
size_t jj = (threadIdx.y + blockDim.y * blockIdx.y) * 32;
size_t tt0 = (threadIdx.z + blockDim.z * blockIdx.z) * 100;
int sum[32];
unsigned int chtmp, chout;
bool b2;
if (ii < cols1 && jj < cols2) {
for(size_t cnt=0;cnt<32;cnt++) {
sum[cnt]=0;
}
for (size_t tt=tt0; tt < tt0 + 100 && tt<rows ; tt++) {
chout = 0;
chtmp = __ldg(&data1[(tt*size1 + ii)/32]);
if ( (chtmp & (unsigned int)(ONEBIT>>(ii%32))) != 0) {
chtmp = __ldg(&data2[(tt*size2 + jj)/32]);
chout |= chtmp;
size_t cnt;
for (cnt=0; cnt<32; cnt++) {
b2 = (chtmp & (unsigned int)(ONEBIT>>cnt)) != 0;
if (b2) {
if (jj+cnt < cols2) {
sum[cnt]++;
} else {
//Don't write past the end of the data2 row
chout -= chout & (ONEBIT>>cnt);
}
}
}
atomicOr(&outdata[(tt*outsize + ii*cols2 + jj)/32],
chout>>((tt*outsize + ii*cols2 + jj)%32));
}
}
for(size_t cnt=0;cnt<32;cnt++) {
if (jj+cnt < cols2) {
atomicAdd(&count[cols2*ii + jj + cnt ], sum[cnt]);
}
}
}
}
void two_list_freq_count(flexarray_bool *data1/* txi */,
flexarray_bool *data2/* txj */, int *count/* ixj */,
flexarray_bool *outdata/* tx(i*j) */) {
#if defined(__CUDA__)
cudaMemset(outdata->data, 0, sizeof(unsigned int)*outdata->real_c*outdata->r/32);
cudaCheckError();
two_list_freq_count_kernel<<<dim3((data1->c+31)/32,(data2->c+7)/8, (data1->r+99)/100),
dim3(32,8,1), 0, myStream>>>
(data1->data, data1->r, data1->c, data1->real_c,
data2->data, data2->c, data2->real_c, count,
outdata->data, outdata->real_c);
cudaCheckError();
#else
for (int ii=0;ii < data1->c; ii++ ) {
for (int jj=0; jj < data2->c; jj++ ) {
int sum = 0;
for (int tt=0; tt < data1->r; tt++) {
if ((*data1)(tt,ii) && (*data2)(tt,jj)) sum++;
outdata->set(tt, ii*data2->c + jj, (*data1)(tt,ii) && (*data2)(tt,jj));
}
count[data2->c*ii + jj] = sum;
}
}
#endif
}
#if 1
// set count to zero for any sets not in ascending order
// since the first d-1 elements of a set are guaranteed to be in order (because we did this step
// last time) we only need to check each element against the last
// also, fills an int* with an ordered set of integers from 0 to length
__global__ void zero_bad_sets(int* count, size_t length, int *range, const int* descriptions, const int d) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (d > 1 && idx < length*(d-1)) {
int newi = d-1 + d*(idx/(d-1));
int ii = idx%(d-1) + d*(idx/(d-1));
if (descriptions[newi] <= descriptions[ii]) {
count[idx/(d-1)] = 0;
}
}
if (idx < length) range[idx] = idx;
}
// Rearrange the flexarray_bool data according to "order"
__global__ void rearrange_data(unsigned int *outdata, const unsigned int *data, const size_t cols, const size_t real_c, const size_t rows, int* order ) {
int ii = (threadIdx.x + blockDim.x * blockIdx.x)*32;
int tt = threadIdx.y + blockDim.y * blockIdx.y;
unsigned int iout = 0;
unsigned int mask = (unsigned int)1<<31;
unsigned int iin;
int inidx;
if (tt < rows && ii<cols) {
for(int cnt = 0; ii+cnt < cols && cnt < 32; cnt++) {
inidx = order[ii+cnt];
iin = data[(tt*real_c + inidx)/32];
if ((iin & (ONEBIT>>(inidx%32))) != 0) {
iout |= mask;
}
mask >>=1;
}
outdata[(tt*real_c + ii)/32]= iout;
}
}
// In a set assumed to be ordered, find the first element greater than
// or equal to threshold
__global__ void find_size(int *result, int *count, size_t max, int threshold) {
int ii = threadIdx.x + blockDim.x * blockIdx.x;
if (ii==0) {
if (count[ii] >= threshold) *result = max;
} else if (ii < max) {
if(count[ii] >=threshold && count[ii-1] < threshold) {
*result = max-ii;
}
}
}
int threshold_count_gpu(int *descriptions/* (i*j)xd */, const size_t i, const size_t j, const int d, int *count/* j*i */,
flexarray_bool *data/* tx(i*j) */, const size_t t, const int threshold) {
int *range;
int *count_buf, *range_buf;
cudaMalloc(&range, sizeof(int)*i*j);
int *d_descriptions;
if (d >1) {
cudaMalloc(&d_descriptions, sizeof(int)*d*i*j);
cudaMemcpy(d_descriptions, descriptions, sizeof(int)*d*i*j, cudaMemcpyHostToDevice);
zero_bad_sets<<<(i*j*(d-1)+127)/128, 128>>>(count, i*j, range, d_descriptions, d);
} else {
//just initialize range
zero_bad_sets<<<(i*j+127)/128, 128>>>(count, i*j, range, d_descriptions, d);
}
cudaMalloc(&count_buf, sizeof(int)*i*j);
cudaMalloc(&range_buf, sizeof(int)*i*j);
cub::DoubleBuffer<int> d_count(count, count_buf);
cub::DoubleBuffer<int> d_range(range, range_buf);
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
cub::DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, d_count,
d_range, i*j);
cudaMalloc(&d_temp_storage, temp_storage_bytes);
cub::DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, d_count,
d_range, i*j);
int new_size = i*j;
//count_buf is just a place to put the result
find_size<<<(i*j+127)/128,128>>>(count_buf, count, i*j, threshold);
cudaMemcpy(&new_size, count_buf, sizeof(int), cudaMemcpyDeviceToHost);
if (new_size == 0) {
data->c = 0;
return 0;
}
cub::DoubleBuffer<int> d_range_new(range+(i*j-new_size), range_buf);
cub::DoubleBuffer<int> d_count_new(count+(i*j-new_size), count_buf);
cub::DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes,
d_range_new, d_count_new, new_size);
cudaFree(d_temp_storage);
cudaFree(range_buf);
cudaFree(count_buf);
cudaCheckError();
//copy range data to host
int *h_range = (int*) malloc(sizeof(int)*new_size);
cudaMemcpy(h_range, range+(i*j-new_size), sizeof(int)*new_size, cudaMemcpyDeviceToHost);
//duplicate data on the device
flexarray_bool *indata = new flexarray_bool(data->r, data->real_c, true);
cudaMemcpyAsync(indata->data, data->data, sizeof(unsigned int) * data->r * data->real_c/32, cudaMemcpyDeviceToDevice, myStream);
cudaCheckError();
//simultaneously process descriptions (on the host) and data (on the device)
rearrange_data<<<dim3((new_size+31)/32,(t+7)/8), dim3(32,8), 0, myStream>>>(data->data, indata->data, new_size, data->real_c, t, range+(i*j-new_size));
cudaCheckError();
for (int p=0; p< new_size; p++) {
int inidx = h_range[p];
for (int dd=0; dd<d; dd++) {
descriptions[d*p+dd] = descriptions[d*inidx+dd];
}
}
free(h_range);
delete(indata);
cudaFree(range);
if (d >1) {
cudaFree(d_descriptions);
}
cudaCheckError();
data->c = new_size;
return new_size;
}
#endif
int threshold_count(int *descriptions/* (i*j)xd */, const size_t i, const size_t j, const int d, int *count/* j*i */,
flexarray_bool *data/* tx(i*j) */, const size_t t, const int threshold) {
int *h_count;
#if defined(__CUDA__)
cudaCheckError();
h_count = (int*)malloc(sizeof(int)*i*j);
cudaMemcpy(h_count, count, sizeof(int)*i*j, cudaMemcpyDeviceToHost);
cudaCheckError();
#else
h_count = count;
#endif
size_t inx=0; //breaks parallelization
for (int jj=0; jj < i*j; jj++) {
if (h_count[jj] >= threshold) {
bool dup = false;
for (int dd=0; dd < d; dd++) {
if (dd < d-1 && descriptions[d*jj + dd] >= descriptions[d*jj+d-1]) dup = true;
descriptions[d*inx + dd] = descriptions[d*jj + dd];
}
if (dup) continue;
for (int tt=0; tt < data->r; tt++) {
data->set(tt,inx,(*data)(tt,jj));
cudaCheckError();
}
h_count[inx] = h_count[jj];
inx++;
}
}
data->c = inx;
#if defined(__CUDA__)
cudaCheckError();
cudaMemcpy(count, h_count, sizeof(int)*i*j, cudaMemcpyHostToDevice);
cudaCheckError();
free(h_count);
#endif
return inx;
}
//parte do jorge
vector<string> explode( const string &delimiter, const string &str)
{
vector<string> arr;
int strleng = str.length();
int delleng = delimiter.length();
if (delleng==0)
return arr;//no change
int i=0;
int k=0;
while( i<strleng )
{
int j=0;
while (i+j<strleng && j<delleng && str[i+j]==delimiter[j])
j++;
if (j==delleng)//found delimiter
{
arr.push_back( str.substr(k, i-k) );
i+=delleng;
k=i;
}
else
{
i++;
}
}
arr.push_back( str.substr(k, i-k) );
return arr;
}
|
b1e9afcdbf360d845ee0c26d56445102fb0e0170.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file SobolQRNG.cu
* @details This file describes the functions belonging to SobolQRNG class.
* @author Antonio Jose Lazaro Munoz.
* @date 20/02/2016
*/
#include "SobolQRNG.h"
#include "SobolQRNG_kernel.cu"
SobolQRNG::SobolQRNG(int n_v, int n_d, int gpu)
{
n_vectors = n_v;
n_dimensions = n_d;
hipDeviceProp_t props;
hipGetDeviceProperties(&props, gpu);
// This implementation of the generator outputs all the draws for
// one dimension in a contiguous region of memory, followed by the
// next dimension and so on.
// Therefore all threads within a block will be processing different
// vectors from the same dimension. As a result we want the total
// number of blocks to be a multiple of the number of dimensions.
n_blocks = n_dimensions;
// If the number of dimensions is large then we will set the number
// of blocks to equal the number of dimensions (i.e. dimGrid.x = 1)
// but if the number of dimensions is small (e.g. less than four per
// multiprocessor) then we'll partition the vectors across blocks
// (as well as threads).
if (n_dimensions < (4 * props.multiProcessorCount))
{
n_blocks = 4 * props.multiProcessorCount;
}
else
{
n_blocks = 1;
}
// Cap the dimGrid.x if the number of vectors is small
if (n_blocks > (unsigned int)(n_vectors / threadsperblock))
{
n_blocks = (n_vectors + threadsperblock - 1) / threadsperblock;
}
// Round up to a power of two, required for the algorithm so that
// stride is a power of two.
unsigned int targetDimGridX = n_blocks;
for (n_blocks = 1 ; n_blocks < targetDimGridX ; n_blocks *= 2);
}
SobolQRNG::~SobolQRNG()
{
if(h_directions != NULL) hipHostFree(h_directions);
if(h_outputGPU != NULL) hipHostFree(h_outputGPU);
if(h_outputCPU != NULL) delete [] h_outputCPU;
if(d_directions != NULL) hipFree(d_directions);
if(d_output != NULL) hipFree(d_output);
}
void SobolQRNG::allocHostMemory(void)
{
hipHostMalloc((void **)&h_directions, n_dimensions * n_directions_SOBOLQRNG * sizeof(unsigned int));
hipHostMalloc((void **)&h_outputGPU, n_vectors * n_dimensions * sizeof(float));
h_outputCPU = new float [n_vectors * n_dimensions];
}
void SobolQRNG::freeHostMemory(void)
{
if(h_directions != NULL) hipHostFree(h_directions);
if(h_outputGPU != NULL) hipHostFree(h_outputGPU);
if(h_outputCPU != NULL) delete [] h_outputCPU;
}
void SobolQRNG::allocDeviceMemory(void)
{
hipMalloc((void **)&d_directions, n_dimensions * n_directions_SOBOLQRNG * sizeof(unsigned int));
hipMalloc((void **)&d_output, n_vectors * n_dimensions * sizeof(float));
}
void SobolQRNG::freeDeviceMemory(void)
{
if(d_directions != NULL) hipFree(d_directions);
if(d_output != NULL) hipFree(d_output);
}
void SobolQRNG::generatingData(void)
{
unsigned int *v = h_directions;
for (int dim = 0 ; dim < n_dimensions ; dim++)
{
// First dimension is a special case
if (dim == 0)
{
for (int i = 0 ; i < n_directions_SOBOLQRNG ; i++)
{
// All m's are 1
v[i] = 1 << (31 - i);
}
}
else
{
int d = sobol_primitives[dim].degree;
// The first direction numbers (up to the degree of the polynomial)
// are simply v[i] = m[i] / 2^i (stored in Q0.32 format)
for (int i = 0 ; i < d ; i++)
{
v[i] = sobol_primitives[dim].m[i] << (31 - i);
}
// The remaining direction numbers are computed as described in
// the Bratley and Fox paper.
// v[i] = a[1]v[i-1] ^ a[2]v[i-2] ^ ... ^ a[v-1]v[i-d+1] ^ v[i-d] ^ v[i-d]/2^d
for (int i = d ; i < n_directions_SOBOLQRNG ; i++)
{
// First do the v[i-d] ^ v[i-d]/2^d part
v[i] = v[i - d] ^ (v[i - d] >> d);
// Now do the a[1]v[i-1] ^ a[2]v[i-2] ^ ... part
// Note that the coefficients a[] are zero or one and for compactness in
// the input tables they are stored as bits of a single integer. To extract
// the relevant bit we use right shift and mask with 1.
// For example, for a 10 degree polynomial there are ten useful bits in a,
// so to get a[2] we need to right shift 7 times (to get the 8th bit into
// the LSB) and then mask with 1.
for (int j = 1 ; j < d ; j++)
{
v[i] ^= (((sobol_primitives[dim].a >> (d - 1 - j)) & 1) * v[i - j]);
}
}
}
v += n_directions_SOBOLQRNG;
}
}
void SobolQRNG::memHostToDeviceAsync(hipStream_t stream)
{
hipMemcpyAsync(d_directions, h_directions,
n_dimensions * n_directions_SOBOLQRNG * sizeof(unsigned int), hipMemcpyHostToDevice, stream);
}
void SobolQRNG::memHostToDevice(void)
{
hipMemcpy(d_directions, h_directions,
n_dimensions * n_directions_SOBOLQRNG * sizeof(unsigned int), hipMemcpyHostToDevice);
}
void SobolQRNG::memDeviceToHostAsync(hipStream_t stream)
{
hipMemcpyAsync(h_outputGPU, d_output,
n_vectors * n_dimensions * sizeof(float), hipMemcpyDeviceToHost, stream);
}
void SobolQRNG::memDeviceToHost(void)
{
hipMemcpy(h_outputGPU, d_output,
n_vectors * n_dimensions * sizeof(float), hipMemcpyDeviceToHost);
}
void SobolQRNG::launch_kernel_Async(hipStream_t stream)
{
// Set up the execution configuration
dim3 dimGrid;
dim3 dimBlock;
dimBlock.x = threadsperblock;
dimGrid.x = n_blocks;
hipLaunchKernelGGL(( sobolGPU_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream, n_vectors, n_dimensions, d_directions, d_output);
}
void SobolQRNG::launch_kernel(void)
{
// Set up the execution configuration
dim3 dimGrid;
dim3 dimBlock;
dimBlock.x = threadsperblock;
dimGrid.x = n_blocks;
hipLaunchKernelGGL(( sobolGPU_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, n_vectors, n_dimensions, d_directions, d_output);
}
void SobolQRNG::checkResults(void)
{
sobolCPU(n_vectors, n_dimensions, h_directions, h_outputCPU);
if (n_vectors == 1)
{
for (int d = 0, v = 0 ; d < n_dimensions ; d++)
{
float ref = h_outputCPU[d * n_vectors + v];
l1norm_diff += fabs(h_outputGPU[d * n_vectors + v] - ref);
l1norm_ref += fabs(ref);
}
// Output the L1-Error
l1error = l1norm_diff;
}
else
{
for (int d = 0 ; d < n_dimensions ; d++)
{
for (int v = 0 ; v < n_vectors ; v++)
{
float ref = h_outputCPU[d * n_vectors + v];
l1norm_diff += fabs(h_outputGPU[d * n_vectors + v] - ref);
l1norm_ref += fabs(ref);
}
}
// Output the L1-Error
l1error = l1norm_diff / l1norm_ref;
}
}
void SobolQRNG::getBytesHTD(int *bytes_htd)
{
*bytes_htd = n_dimensions * n_directions_SOBOLQRNG * sizeof(unsigned int);
}
void SobolQRNG::getBytesDTH(int *bytes_dth)
{
*bytes_dth = n_vectors * n_dimensions * sizeof(float);
}
void SobolQRNG::getTimeEstimations_HTD_DTH(int gpu, float *estimated_time_HTD, float *estimated_time_DTH,
float *estimated_overlapped_time_HTD, float *estimated_overlapped_time_DTH,
float LoHTD, float LoDTH, float GHTD, float GDTH, float overlappedGHTD, float overlappedGDTH)
{
hipDeviceProp_t props;
hipGetDeviceProperties(&props, gpu);
int bytes_HTD;
int bytes_DTH;
getBytesHTD(&bytes_HTD);
getBytesDTH(&bytes_DTH);
*estimated_time_HTD = LoHTD + (bytes_HTD) * GHTD;
*estimated_overlapped_time_HTD = 0.0;
if(props.asyncEngineCount == 2)
*estimated_overlapped_time_HTD = LoHTD + (bytes_HTD) * overlappedGHTD;
*estimated_time_DTH = LoDTH + (bytes_DTH) * GDTH;
*estimated_overlapped_time_DTH= 0.0;
if(props.asyncEngineCount == 2)
*estimated_overlapped_time_DTH= LoDTH + (bytes_DTH) * overlappedGDTH;
}
void SobolQRNG::sobolCPU(int n_vectors, int n_dimensions, unsigned int *directions, float *output)
{
unsigned int *v = directions;
for (int d = 0 ; d < n_dimensions ; d++)
{
unsigned int X = 0;
// x[0] is zero (in all dimensions)
output[n_vectors * d] = 0.0;
for (int i = 1 ; i < n_vectors ; i++)
{
// x[i] = x[i-1] ^ v[c]
// where c is the index of the rightmost zero bit in i
// minus 1 (since C arrays count from zero)
// In the Bratley and Fox paper this is equation (**)
X ^= v[ffs(~(i - 1)) - 1];
output[i + n_vectors * d] = (float)X * k_2powneg32_SOBOLQRNG;
}
v += n_directions_SOBOLQRNG;
}
}
|
b1e9afcdbf360d845ee0c26d56445102fb0e0170.cu
|
/**
* @file SobolQRNG.cu
* @details This file describes the functions belonging to SobolQRNG class.
* @author Antonio Jose Lazaro Munoz.
* @date 20/02/2016
*/
#include "SobolQRNG.h"
#include "SobolQRNG_kernel.cu"
SobolQRNG::SobolQRNG(int n_v, int n_d, int gpu)
{
n_vectors = n_v;
n_dimensions = n_d;
cudaDeviceProp props;
cudaGetDeviceProperties(&props, gpu);
// This implementation of the generator outputs all the draws for
// one dimension in a contiguous region of memory, followed by the
// next dimension and so on.
// Therefore all threads within a block will be processing different
// vectors from the same dimension. As a result we want the total
// number of blocks to be a multiple of the number of dimensions.
n_blocks = n_dimensions;
// If the number of dimensions is large then we will set the number
// of blocks to equal the number of dimensions (i.e. dimGrid.x = 1)
// but if the number of dimensions is small (e.g. less than four per
// multiprocessor) then we'll partition the vectors across blocks
// (as well as threads).
if (n_dimensions < (4 * props.multiProcessorCount))
{
n_blocks = 4 * props.multiProcessorCount;
}
else
{
n_blocks = 1;
}
// Cap the dimGrid.x if the number of vectors is small
if (n_blocks > (unsigned int)(n_vectors / threadsperblock))
{
n_blocks = (n_vectors + threadsperblock - 1) / threadsperblock;
}
// Round up to a power of two, required for the algorithm so that
// stride is a power of two.
unsigned int targetDimGridX = n_blocks;
for (n_blocks = 1 ; n_blocks < targetDimGridX ; n_blocks *= 2);
}
SobolQRNG::~SobolQRNG()
{
if(h_directions != NULL) cudaFreeHost(h_directions);
if(h_outputGPU != NULL) cudaFreeHost(h_outputGPU);
if(h_outputCPU != NULL) delete [] h_outputCPU;
if(d_directions != NULL) cudaFree(d_directions);
if(d_output != NULL) cudaFree(d_output);
}
void SobolQRNG::allocHostMemory(void)
{
cudaMallocHost((void **)&h_directions, n_dimensions * n_directions_SOBOLQRNG * sizeof(unsigned int));
cudaMallocHost((void **)&h_outputGPU, n_vectors * n_dimensions * sizeof(float));
h_outputCPU = new float [n_vectors * n_dimensions];
}
void SobolQRNG::freeHostMemory(void)
{
if(h_directions != NULL) cudaFreeHost(h_directions);
if(h_outputGPU != NULL) cudaFreeHost(h_outputGPU);
if(h_outputCPU != NULL) delete [] h_outputCPU;
}
void SobolQRNG::allocDeviceMemory(void)
{
cudaMalloc((void **)&d_directions, n_dimensions * n_directions_SOBOLQRNG * sizeof(unsigned int));
cudaMalloc((void **)&d_output, n_vectors * n_dimensions * sizeof(float));
}
void SobolQRNG::freeDeviceMemory(void)
{
if(d_directions != NULL) cudaFree(d_directions);
if(d_output != NULL) cudaFree(d_output);
}
void SobolQRNG::generatingData(void)
{
unsigned int *v = h_directions;
for (int dim = 0 ; dim < n_dimensions ; dim++)
{
// First dimension is a special case
if (dim == 0)
{
for (int i = 0 ; i < n_directions_SOBOLQRNG ; i++)
{
// All m's are 1
v[i] = 1 << (31 - i);
}
}
else
{
int d = sobol_primitives[dim].degree;
// The first direction numbers (up to the degree of the polynomial)
// are simply v[i] = m[i] / 2^i (stored in Q0.32 format)
for (int i = 0 ; i < d ; i++)
{
v[i] = sobol_primitives[dim].m[i] << (31 - i);
}
// The remaining direction numbers are computed as described in
// the Bratley and Fox paper.
// v[i] = a[1]v[i-1] ^ a[2]v[i-2] ^ ... ^ a[v-1]v[i-d+1] ^ v[i-d] ^ v[i-d]/2^d
for (int i = d ; i < n_directions_SOBOLQRNG ; i++)
{
// First do the v[i-d] ^ v[i-d]/2^d part
v[i] = v[i - d] ^ (v[i - d] >> d);
// Now do the a[1]v[i-1] ^ a[2]v[i-2] ^ ... part
// Note that the coefficients a[] are zero or one and for compactness in
// the input tables they are stored as bits of a single integer. To extract
// the relevant bit we use right shift and mask with 1.
// For example, for a 10 degree polynomial there are ten useful bits in a,
// so to get a[2] we need to right shift 7 times (to get the 8th bit into
// the LSB) and then mask with 1.
for (int j = 1 ; j < d ; j++)
{
v[i] ^= (((sobol_primitives[dim].a >> (d - 1 - j)) & 1) * v[i - j]);
}
}
}
v += n_directions_SOBOLQRNG;
}
}
void SobolQRNG::memHostToDeviceAsync(cudaStream_t stream)
{
cudaMemcpyAsync(d_directions, h_directions,
n_dimensions * n_directions_SOBOLQRNG * sizeof(unsigned int), cudaMemcpyHostToDevice, stream);
}
void SobolQRNG::memHostToDevice(void)
{
cudaMemcpy(d_directions, h_directions,
n_dimensions * n_directions_SOBOLQRNG * sizeof(unsigned int), cudaMemcpyHostToDevice);
}
void SobolQRNG::memDeviceToHostAsync(cudaStream_t stream)
{
cudaMemcpyAsync(h_outputGPU, d_output,
n_vectors * n_dimensions * sizeof(float), cudaMemcpyDeviceToHost, stream);
}
void SobolQRNG::memDeviceToHost(void)
{
cudaMemcpy(h_outputGPU, d_output,
n_vectors * n_dimensions * sizeof(float), cudaMemcpyDeviceToHost);
}
void SobolQRNG::launch_kernel_Async(cudaStream_t stream)
{
// Set up the execution configuration
dim3 dimGrid;
dim3 dimBlock;
dimBlock.x = threadsperblock;
dimGrid.x = n_blocks;
sobolGPU_kernel<<<dimGrid, dimBlock, 0, stream>>>(n_vectors, n_dimensions, d_directions, d_output);
}
void SobolQRNG::launch_kernel(void)
{
// Set up the execution configuration
dim3 dimGrid;
dim3 dimBlock;
dimBlock.x = threadsperblock;
dimGrid.x = n_blocks;
sobolGPU_kernel<<<dimGrid, dimBlock>>>(n_vectors, n_dimensions, d_directions, d_output);
}
void SobolQRNG::checkResults(void)
{
sobolCPU(n_vectors, n_dimensions, h_directions, h_outputCPU);
if (n_vectors == 1)
{
for (int d = 0, v = 0 ; d < n_dimensions ; d++)
{
float ref = h_outputCPU[d * n_vectors + v];
l1norm_diff += fabs(h_outputGPU[d * n_vectors + v] - ref);
l1norm_ref += fabs(ref);
}
// Output the L1-Error
l1error = l1norm_diff;
}
else
{
for (int d = 0 ; d < n_dimensions ; d++)
{
for (int v = 0 ; v < n_vectors ; v++)
{
float ref = h_outputCPU[d * n_vectors + v];
l1norm_diff += fabs(h_outputGPU[d * n_vectors + v] - ref);
l1norm_ref += fabs(ref);
}
}
// Output the L1-Error
l1error = l1norm_diff / l1norm_ref;
}
}
void SobolQRNG::getBytesHTD(int *bytes_htd)
{
*bytes_htd = n_dimensions * n_directions_SOBOLQRNG * sizeof(unsigned int);
}
void SobolQRNG::getBytesDTH(int *bytes_dth)
{
*bytes_dth = n_vectors * n_dimensions * sizeof(float);
}
void SobolQRNG::getTimeEstimations_HTD_DTH(int gpu, float *estimated_time_HTD, float *estimated_time_DTH,
float *estimated_overlapped_time_HTD, float *estimated_overlapped_time_DTH,
float LoHTD, float LoDTH, float GHTD, float GDTH, float overlappedGHTD, float overlappedGDTH)
{
cudaDeviceProp props;
cudaGetDeviceProperties(&props, gpu);
int bytes_HTD;
int bytes_DTH;
getBytesHTD(&bytes_HTD);
getBytesDTH(&bytes_DTH);
*estimated_time_HTD = LoHTD + (bytes_HTD) * GHTD;
*estimated_overlapped_time_HTD = 0.0;
if(props.asyncEngineCount == 2)
*estimated_overlapped_time_HTD = LoHTD + (bytes_HTD) * overlappedGHTD;
*estimated_time_DTH = LoDTH + (bytes_DTH) * GDTH;
*estimated_overlapped_time_DTH= 0.0;
if(props.asyncEngineCount == 2)
*estimated_overlapped_time_DTH= LoDTH + (bytes_DTH) * overlappedGDTH;
}
void SobolQRNG::sobolCPU(int n_vectors, int n_dimensions, unsigned int *directions, float *output)
{
unsigned int *v = directions;
for (int d = 0 ; d < n_dimensions ; d++)
{
unsigned int X = 0;
// x[0] is zero (in all dimensions)
output[n_vectors * d] = 0.0;
for (int i = 1 ; i < n_vectors ; i++)
{
// x[i] = x[i-1] ^ v[c]
// where c is the index of the rightmost zero bit in i
// minus 1 (since C arrays count from zero)
// In the Bratley and Fox paper this is equation (**)
X ^= v[ffs(~(i - 1)) - 1];
output[i + n_vectors * d] = (float)X * k_2powneg32_SOBOLQRNG;
}
v += n_directions_SOBOLQRNG;
}
}
|
c520629d90f59e6dcd60761ccd10724af8232b3c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <vector>
#include <set>
#include <map>
#include <algorithm>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
using namespace std;
#define CUDA_SAFE_CALL( err ) (safe_call(err, __LINE__))
#define MAX_THREADS_PER_BLOCK 1024
#define GLOBAL_MAX_EDGES_PER_SHARD 33554432
void safe_call(hipError_t ret, int line)
{
if(ret!=hipSuccess)
{
printf("Error at line %d : %s\n",line,hipGetErrorString(ret));
exit(-1);
}
}
typedef struct __interval
{
int start;
int end;
} interval_t;
typedef struct __edge
{
int src;
int dest;
int val;
} edge_t;
typedef struct __vertex
{
int val;
} vertex_t;
typedef struct __shard
{
int E;
int Vstart;
int Vend;
int * vmap;
vertex_t * from;
vertex_t * to;
} shard_t;
__device__ bool d_over;
__global__ void reset()
{
d_over = false;
}
__global__ void init(vertex_t * vertices, int starting_vertex, int num_vertices)
{
int v = blockDim.x*blockIdx.x + threadIdx.x;
if (v==starting_vertex)
vertices[v].val = 0;
else if(v < num_vertices)
vertices[v].val = -1;
}
/*__global__ void gather_bfs(shard_t * shard, vertex_t * vertices, int current_depth)
{
int id = blockDim.x*blockIdx.x + threadIdx.x;
if(id < shard->E)
{
if(shard->edges[id].val == (current_depth+1))
{
int t=shard->edges[id].dest;
if(vertices[t].val == -1)
{
vertices[t].val = current_depth+1;
d_over = true;
}
}
}
}*/
__global__ void scatter_bfs_edge(const shard_t * shard, vertex_t * vertices, int current_depth)
{
int id = blockDim.x*blockIdx.x + threadIdx.x;
if(id < shard->E)
{
int s=shard->from[id].val;
int t=vertices[s].val;
if(t==current_depth)
{
int u=shard->to[id].val;
if(vertices[u].val == -1)
{
vertices[u].val = t+1;
d_over = true;
}
}
}
}
__global__ void scatter_bfs_vertex(const shard_t * shard, vertex_t * vertices, int current_depth)
{
int id = blockDim.x*blockIdx.x + threadIdx.x;
int vid = id + shard->Vstart;
if(vid <= shard->Vend)
{
if(vertices[vid].val == current_depth)
{
int i;
if(id == 0)
i = 0;
else
i = shard->vmap[id-1];
for(; i < shard->vmap[id]; i++)
{
if(vertices[shard->to[i].val].val == -1)
{
vertices[shard->to[i].val].val = current_depth+1;
d_over = true;
}
}
}
}
}
bool cost(const edge_t &a, const edge_t &b)
{
return ((a.src < b.src) || (a.src == b.src && a.dest < b.dest));
}
int main(int argc, char * argv[])
{
struct timeval t1,t2;
static char * filename;
if(argc!=2)
{
printf("./a.out <filename>\n");
exit(-1);
}
else
{
filename = argv[1];
}
FILE * fp = fopen(filename,"r");
if(!fp)
{
printf("Error reading file.\n");
exit(-1);
}
/* Set cuda device to K40 */
CUDA_SAFE_CALL(hipSetDevice(0));
printf("Begin file reading...\n");
/* Get graph from file into CPU memory */
int num_vertices, num_edges, i, j, k;
fscanf(fp,"%d %d",&num_vertices,&num_edges);
//We are always going to have atleast 2 shards to have double bufferring
int ns = num_edges / GLOBAL_MAX_EDGES_PER_SHARD;
int MAX_EDGES_PER_SHARD = (ns == 0) ? (num_edges + 1)/2 : (num_edges + 1)/(ns + 1); //We do this to balance the no of edges in the shards
//Array of vectors. vector i contains the in edges of vertex i
vector< vector<edge_t> > outEdges(num_vertices);
int * prefixV = (int *) calloc(num_vertices,sizeof(int));
int s,d,v;
// In Graphchi case, I am storing the source depth in each edge
// In X-stream case, I am storing the destination depth in each edge
for(i=0; i<num_edges; i++)
{
fscanf(fp,"%d",&s);
fscanf(fp,"%d",&d);
edge_t e;
e.src=s;
e.dest=d;
outEdges[s].push_back(e);
}
printf("Finished file reading.\n");
printf("\nBegin interval construction...\n");
// Construction of intervals
gettimeofday(&t1,NULL);
int num_intervals = 0, add = 1;
vector<int> startInter;
prefixV[0] = outEdges[0].size();
if(prefixV[0] > MAX_EDGES_PER_SHARD)
{
startInter.push_back(0);
num_intervals++;
add = 0;
}
for(i=1; i<num_vertices; i++)
{
prefixV[i] = outEdges[i].size();
if(add==1)
prefixV[i] += prefixV[i-1];
if(prefixV[i] > MAX_EDGES_PER_SHARD)
{
startInter.push_back(i);
num_intervals++;
add = 0;
}
else
add = 1;
}
if(add==1)
{
startInter.push_back(i-1);
num_intervals++;
}
interval_t * interval = (interval_t *) malloc(num_intervals*sizeof(interval_t));
for(i=0; i<num_intervals; i++)
{
interval[i].start = (i == 0) ? 0 : (startInter[i-1]+1);
interval[i].end = startInter[i];
}
gettimeofday(&t2,NULL);
printf("Time to construct intervals : %f sec\n",((t2.tv_sec+t2.tv_usec*1.0e-6)-(t1.tv_sec+t1.tv_usec*1.0e-6)));
printf("\nBegin shard construction...\n");
//Construction of shard
gettimeofday(&t1,NULL);
shard_t * shard = (shard_t *) malloc(num_intervals*sizeof(shard_t));
//Finding the max number of edges in a shard
// We will allocate space for that many edges to each shard to maintain consistency
int MAX_NUM_EDGES_SHARD = INT_MIN;
int MAX_NUM_VERTICES_SHARD = INT_MIN;
for(i=0; i<num_intervals; i++)
{
int t = prefixV[interval[i].end];
if(t > MAX_NUM_EDGES_SHARD)
MAX_NUM_EDGES_SHARD = t;
int q = interval[i].end-interval[i].start+1;
if(q > MAX_NUM_VERTICES_SHARD)
MAX_NUM_VERTICES_SHARD = q;
}
for(i=0; i<num_intervals; i++)
{
// first and last vertices in shard
shard[i].Vstart = interval[i].start;
shard[i].Vend = interval[i].end;
shard[i].E = prefixV[interval[i].end];
shard[i].vmap = (int *) malloc(MAX_NUM_VERTICES_SHARD*sizeof(int));
shard[i].from = (vertex_t *) malloc(MAX_NUM_EDGES_SHARD*sizeof(vertex_t));
shard[i].to = (vertex_t *) malloc(MAX_NUM_EDGES_SHARD*sizeof(vertex_t));
}
for(i=0; i<num_intervals; i++)
{
vector<edge_t> tempEdges;
for(j=interval[i].start; j<=interval[i].end; j++)
{
for(vector<edge_t>::iterator it=outEdges[j].begin(); it!=outEdges[j].end(); ++it)
tempEdges.push_back(*it);
}
//Sorting based on src vertex to align the edges such that the access of vertices[src] is sequential
sort(tempEdges.begin(),tempEdges.end(),cost);
vector< vector<edge_t> > bucket(MAX_NUM_VERTICES_SHARD);
for (vector<edge_t>::iterator it = tempEdges.begin() ; it != tempEdges.end(); ++it)
{
bucket[(*it).src-interval[i].start].push_back(*it);
}
for(j=0;j<MAX_NUM_VERTICES_SHARD;j++)
{
shard[i].vmap[j] = bucket[j].size();
}
for(j=1;j<MAX_NUM_VERTICES_SHARD;j++)
{
shard[i].vmap[j] += shard[i].vmap[j-1];
}
k=0;
for(j=0;j<MAX_NUM_VERTICES_SHARD;j++)
{
for (vector<edge_t>::iterator it = bucket[j].begin() ; it != bucket[j].end(); ++it)
{
shard[i].from[k].val = (*it).src;
shard[i].to[k].val = (*it).dest;
k++;
}
}
}
gettimeofday(&t2,NULL);
printf("Time to construct shards : %f sec\n",((t2.tv_sec+t2.tv_usec*1.0e-6)-(t1.tv_sec+t1.tv_usec*1.0e-6)));
hipStream_t * str;
hipEvent_t * start;
hipEvent_t * stop;
int num_evts=2;
str = (hipStream_t *) malloc(num_evts * sizeof(hipStream_t));
start = (hipEvent_t *) malloc(num_evts * sizeof(hipEvent_t));
stop = (hipEvent_t *) malloc(num_evts * sizeof(hipEvent_t));
for(int i = 0; i < num_evts; i++)
{
CUDA_SAFE_CALL(hipStreamCreate(&(str[i])));
CUDA_SAFE_CALL(hipEventCreate(&(start[i])));
CUDA_SAFE_CALL(hipEventCreate(&(stop[i])));
}
// It will contain the visited status of each vertex
vertex_t *vertices;
//CUDA_SAFE_CALL(hipHostMalloc((void **)&vertices, num_vertices*sizeof(vertex_t)));
vertex_t *vertices_host = (vertex_t *) malloc(num_vertices*sizeof(vertex_t));
CUDA_SAFE_CALL(hipMalloc((void **)&vertices, num_vertices*sizeof(vertex_t)));
hipLaunchKernelGGL(( init), dim3(((num_vertices+MAX_THREADS_PER_BLOCK-1)/MAX_THREADS_PER_BLOCK)),dim3(MAX_THREADS_PER_BLOCK), 0, 0, vertices, 0, num_vertices);
float * diff = (float *) malloc(num_intervals*sizeof(float));
double time = 0;
// For vertex centric algo
shard_t * shard_dev;
int * vmap_dev;
vertex_t * from_dev;
vertex_t * to_dev;
CUDA_SAFE_CALL(hipMalloc((void **)&shard_dev, sizeof(shard_t)));
CUDA_SAFE_CALL(hipMalloc((void **)&vmap_dev, MAX_NUM_VERTICES_SHARD*sizeof(int)));
CUDA_SAFE_CALL(hipMalloc((void **)&from_dev, MAX_NUM_EDGES_SHARD*sizeof(vertex_t)));
CUDA_SAFE_CALL(hipMalloc((void **)&to_dev, MAX_NUM_EDGES_SHARD*sizeof(vertex_t)));
//Extra Buffer for doing double bufferring
shard_t * shard_dev2;
int * vmap_dev2;
vertex_t * from_dev2;
vertex_t * to_dev2;
CUDA_SAFE_CALL(hipMalloc((void **)&shard_dev2, sizeof(shard_t)));
CUDA_SAFE_CALL(hipMalloc((void **)&vmap_dev2, MAX_NUM_VERTICES_SHARD*sizeof(int)));
CUDA_SAFE_CALL(hipMalloc((void **)&from_dev2, MAX_NUM_EDGES_SHARD*sizeof(vertex_t)));
CUDA_SAFE_CALL(hipMalloc((void **)&to_dev2, MAX_NUM_EDGES_SHARD*sizeof(vertex_t)));
int num_of_blocks = 1;
//int MAX_THREADS = MAX_NUM_VERTICES_SHARD;
int MAX_THREADS = MAX_NUM_EDGES_SHARD;
int num_of_threads_per_block = MAX_THREADS;
if(MAX_THREADS>MAX_THREADS_PER_BLOCK)
{
num_of_blocks = (int)ceil(MAX_THREADS/(double)MAX_THREADS_PER_BLOCK);
num_of_threads_per_block = MAX_THREADS_PER_BLOCK;
}
dim3 grid( num_of_blocks, 1, 1);
dim3 threads( num_of_threads_per_block, 1, 1);
printf("Begin kernel\n");
int pingpong;
bool over;
k=0;
do
{
over = false;
CUDA_SAFE_CALL(hipMemcpyToSymbol(d_over, &over, sizeof(bool),0, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipDeviceSynchronize());
pingpong=0;
for(i=0; i<num_intervals; i++)
{
if(pingpong==0)
{
//Copy Ping
CUDA_SAFE_CALL(hipMemcpyAsync(shard_dev, &shard[i], sizeof(shard_t),hipMemcpyHostToDevice,str[0]));
CUDA_SAFE_CALL(hipMemcpyAsync(vmap_dev, shard[i].vmap, MAX_NUM_VERTICES_SHARD*sizeof(int),hipMemcpyHostToDevice,str[0]));
CUDA_SAFE_CALL(hipMemcpyAsync(from_dev, shard[i].from, MAX_NUM_EDGES_SHARD*sizeof(vertex_t),hipMemcpyHostToDevice,str[0]));
CUDA_SAFE_CALL(hipMemcpyAsync(to_dev, shard[i].to, MAX_NUM_EDGES_SHARD*sizeof(vertex_t),hipMemcpyHostToDevice,str[0]));
CUDA_SAFE_CALL(hipMemcpyAsync(&(shard_dev->vmap), &vmap_dev, sizeof(int *),hipMemcpyHostToDevice,str[0]));
CUDA_SAFE_CALL(hipMemcpyAsync(&(shard_dev->from), &from_dev, sizeof(vertex_t *),hipMemcpyHostToDevice,str[0]));
CUDA_SAFE_CALL(hipMemcpyAsync(&(shard_dev->to), &to_dev, sizeof(vertex_t *),hipMemcpyHostToDevice,str[0]));
if(i>0)
{
//Process Pong
CUDA_SAFE_CALL(hipEventRecord(start[1],str[1]));
hipLaunchKernelGGL(( scatter_bfs_edge), dim3(grid), dim3(threads),0,str[1], shard_dev2, vertices, k);
CUDA_SAFE_CALL(hipStreamSynchronize(str[1]));
CUDA_SAFE_CALL(hipEventRecord(stop[1],str[1]));
CUDA_SAFE_CALL(hipEventSynchronize(stop[1]));
CUDA_SAFE_CALL(hipEventElapsedTime(&diff[i-1],start[1],stop[1]));
}
pingpong=1;
}
else
{
//Copy Pong
CUDA_SAFE_CALL(hipMemcpyAsync(shard_dev2, &shard[i], sizeof(shard_t),hipMemcpyHostToDevice,str[1]));
CUDA_SAFE_CALL(hipMemcpyAsync(vmap_dev2, shard[i].vmap, MAX_NUM_VERTICES_SHARD*sizeof(int),hipMemcpyHostToDevice,str[1]));
CUDA_SAFE_CALL(hipMemcpyAsync(from_dev2, shard[i].from, MAX_NUM_EDGES_SHARD*sizeof(vertex_t),hipMemcpyHostToDevice,str[1]));
CUDA_SAFE_CALL(hipMemcpyAsync(to_dev2, shard[i].to, MAX_NUM_EDGES_SHARD*sizeof(vertex_t),hipMemcpyHostToDevice,str[1]));
CUDA_SAFE_CALL(hipMemcpyAsync(&(shard_dev2->vmap), &vmap_dev2, sizeof(int *),hipMemcpyHostToDevice,str[1]));
CUDA_SAFE_CALL(hipMemcpyAsync(&(shard_dev2->from), &from_dev2, sizeof(vertex_t *),hipMemcpyHostToDevice,str[1]));
CUDA_SAFE_CALL(hipMemcpyAsync(&(shard_dev2->to), &to_dev2, sizeof(vertex_t *),hipMemcpyHostToDevice,str[1]));
//Process Pong
CUDA_SAFE_CALL(hipEventRecord(start[0],str[0]));
hipLaunchKernelGGL(( scatter_bfs_edge), dim3(grid), dim3(threads),0,str[0], shard_dev, vertices, k);
CUDA_SAFE_CALL(hipStreamSynchronize(str[0]));
CUDA_SAFE_CALL(hipEventRecord(stop[0],str[0]));
CUDA_SAFE_CALL(hipEventSynchronize(stop[0]));
CUDA_SAFE_CALL(hipEventElapsedTime(&diff[i-1],start[0],stop[0]));
pingpong=0;
}
}
if(pingpong==0)
{
//Process Pong
CUDA_SAFE_CALL(hipEventRecord(start[1],str[1]));
hipLaunchKernelGGL(( scatter_bfs_edge), dim3(grid), dim3(threads),0,str[1], shard_dev2, vertices, k);
CUDA_SAFE_CALL(hipStreamSynchronize(str[1]));
CUDA_SAFE_CALL(hipEventRecord(stop[1],str[1]));
CUDA_SAFE_CALL(hipEventSynchronize(stop[1]));
CUDA_SAFE_CALL(hipEventElapsedTime(&diff[i-1],start[1],stop[1]));
}
else
{
//Process Pong
CUDA_SAFE_CALL(hipEventRecord(start[0],str[0]));
hipLaunchKernelGGL(( scatter_bfs_edge), dim3(grid), dim3(threads),0,str[0], shard_dev, vertices, k);
CUDA_SAFE_CALL(hipStreamSynchronize(str[0]));
CUDA_SAFE_CALL(hipEventRecord(stop[0],str[0]));
CUDA_SAFE_CALL(hipEventSynchronize(stop[0]));
CUDA_SAFE_CALL(hipEventElapsedTime(&diff[i-1],start[1],stop[1]));
}
for(i=0;i<num_intervals;i++)
time += diff[i];
CUDA_SAFE_CALL(hipMemcpyFromSymbol(&over, d_over, sizeof(bool),0, hipMemcpyDeviceToHost));
k++;
}while(over);
printf("Number of iterations : %d\n",k);
/* CUDA_SAFE_CALL(hipMemcpy(vertices_host, vertices, num_vertices*sizeof(vertex_t), hipMemcpyDeviceToHost));
for(int i = 0; i < num_vertices; i++)
{
printf("Vertex %d Distance %d\n",i,vertices_host[i].val);
}
*/
printf("Time: %f ms\n",time);
for(int i = 0; i < num_evts; i++)
{
CUDA_SAFE_CALL(hipStreamDestroy(str[i]));
CUDA_SAFE_CALL(hipEventDestroy(start[i]));
CUDA_SAFE_CALL(hipEventDestroy(stop[i]));
}
free(interval);
for(i=0; i<num_intervals; i++)
{
free(shard[i].vmap);
free(shard[i].from);
free(shard[i].to);
}
free(shard);
free(vertices_host);
CUDA_SAFE_CALL(hipFree(vertices));
CUDA_SAFE_CALL(hipFree(vmap_dev));
CUDA_SAFE_CALL(hipFree(from_dev));
CUDA_SAFE_CALL(hipFree(to_dev));
CUDA_SAFE_CALL(hipFree(shard_dev));
CUDA_SAFE_CALL(hipFree(vmap_dev2));
CUDA_SAFE_CALL(hipFree(from_dev2));
CUDA_SAFE_CALL(hipFree(to_dev2));
CUDA_SAFE_CALL(hipFree(shard_dev2));
return 0;
}
|
c520629d90f59e6dcd60761ccd10724af8232b3c.cu
|
#include <iostream>
#include <vector>
#include <set>
#include <map>
#include <algorithm>
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
using namespace std;
#define CUDA_SAFE_CALL( err ) (safe_call(err, __LINE__))
#define MAX_THREADS_PER_BLOCK 1024
#define GLOBAL_MAX_EDGES_PER_SHARD 33554432
void safe_call(cudaError_t ret, int line)
{
if(ret!=cudaSuccess)
{
printf("Error at line %d : %s\n",line,cudaGetErrorString(ret));
exit(-1);
}
}
typedef struct __interval
{
int start;
int end;
} interval_t;
typedef struct __edge
{
int src;
int dest;
int val;
} edge_t;
typedef struct __vertex
{
int val;
} vertex_t;
typedef struct __shard
{
int E;
int Vstart;
int Vend;
int * vmap;
vertex_t * from;
vertex_t * to;
} shard_t;
__device__ bool d_over;
__global__ void reset()
{
d_over = false;
}
__global__ void init(vertex_t * vertices, int starting_vertex, int num_vertices)
{
int v = blockDim.x*blockIdx.x + threadIdx.x;
if (v==starting_vertex)
vertices[v].val = 0;
else if(v < num_vertices)
vertices[v].val = -1;
}
/*__global__ void gather_bfs(shard_t * shard, vertex_t * vertices, int current_depth)
{
int id = blockDim.x*blockIdx.x + threadIdx.x;
if(id < shard->E)
{
if(shard->edges[id].val == (current_depth+1))
{
int t=shard->edges[id].dest;
if(vertices[t].val == -1)
{
vertices[t].val = current_depth+1;
d_over = true;
}
}
}
}*/
__global__ void scatter_bfs_edge(const shard_t * shard, vertex_t * vertices, int current_depth)
{
int id = blockDim.x*blockIdx.x + threadIdx.x;
if(id < shard->E)
{
int s=shard->from[id].val;
int t=vertices[s].val;
if(t==current_depth)
{
int u=shard->to[id].val;
if(vertices[u].val == -1)
{
vertices[u].val = t+1;
d_over = true;
}
}
}
}
__global__ void scatter_bfs_vertex(const shard_t * shard, vertex_t * vertices, int current_depth)
{
int id = blockDim.x*blockIdx.x + threadIdx.x;
int vid = id + shard->Vstart;
if(vid <= shard->Vend)
{
if(vertices[vid].val == current_depth)
{
int i;
if(id == 0)
i = 0;
else
i = shard->vmap[id-1];
for(; i < shard->vmap[id]; i++)
{
if(vertices[shard->to[i].val].val == -1)
{
vertices[shard->to[i].val].val = current_depth+1;
d_over = true;
}
}
}
}
}
bool cost(const edge_t &a, const edge_t &b)
{
return ((a.src < b.src) || (a.src == b.src && a.dest < b.dest));
}
int main(int argc, char * argv[])
{
struct timeval t1,t2;
static char * filename;
if(argc!=2)
{
printf("./a.out <filename>\n");
exit(-1);
}
else
{
filename = argv[1];
}
FILE * fp = fopen(filename,"r");
if(!fp)
{
printf("Error reading file.\n");
exit(-1);
}
/* Set cuda device to K40 */
CUDA_SAFE_CALL(cudaSetDevice(0));
printf("Begin file reading...\n");
/* Get graph from file into CPU memory */
int num_vertices, num_edges, i, j, k;
fscanf(fp,"%d %d",&num_vertices,&num_edges);
//We are always going to have atleast 2 shards to have double bufferring
int ns = num_edges / GLOBAL_MAX_EDGES_PER_SHARD;
int MAX_EDGES_PER_SHARD = (ns == 0) ? (num_edges + 1)/2 : (num_edges + 1)/(ns + 1); //We do this to balance the no of edges in the shards
//Array of vectors. vector i contains the in edges of vertex i
vector< vector<edge_t> > outEdges(num_vertices);
int * prefixV = (int *) calloc(num_vertices,sizeof(int));
int s,d,v;
// In Graphchi case, I am storing the source depth in each edge
// In X-stream case, I am storing the destination depth in each edge
for(i=0; i<num_edges; i++)
{
fscanf(fp,"%d",&s);
fscanf(fp,"%d",&d);
edge_t e;
e.src=s;
e.dest=d;
outEdges[s].push_back(e);
}
printf("Finished file reading.\n");
printf("\nBegin interval construction...\n");
// Construction of intervals
gettimeofday(&t1,NULL);
int num_intervals = 0, add = 1;
vector<int> startInter;
prefixV[0] = outEdges[0].size();
if(prefixV[0] > MAX_EDGES_PER_SHARD)
{
startInter.push_back(0);
num_intervals++;
add = 0;
}
for(i=1; i<num_vertices; i++)
{
prefixV[i] = outEdges[i].size();
if(add==1)
prefixV[i] += prefixV[i-1];
if(prefixV[i] > MAX_EDGES_PER_SHARD)
{
startInter.push_back(i);
num_intervals++;
add = 0;
}
else
add = 1;
}
if(add==1)
{
startInter.push_back(i-1);
num_intervals++;
}
interval_t * interval = (interval_t *) malloc(num_intervals*sizeof(interval_t));
for(i=0; i<num_intervals; i++)
{
interval[i].start = (i == 0) ? 0 : (startInter[i-1]+1);
interval[i].end = startInter[i];
}
gettimeofday(&t2,NULL);
printf("Time to construct intervals : %f sec\n",((t2.tv_sec+t2.tv_usec*1.0e-6)-(t1.tv_sec+t1.tv_usec*1.0e-6)));
printf("\nBegin shard construction...\n");
//Construction of shard
gettimeofday(&t1,NULL);
shard_t * shard = (shard_t *) malloc(num_intervals*sizeof(shard_t));
//Finding the max number of edges in a shard
// We will allocate space for that many edges to each shard to maintain consistency
int MAX_NUM_EDGES_SHARD = INT_MIN;
int MAX_NUM_VERTICES_SHARD = INT_MIN;
for(i=0; i<num_intervals; i++)
{
int t = prefixV[interval[i].end];
if(t > MAX_NUM_EDGES_SHARD)
MAX_NUM_EDGES_SHARD = t;
int q = interval[i].end-interval[i].start+1;
if(q > MAX_NUM_VERTICES_SHARD)
MAX_NUM_VERTICES_SHARD = q;
}
for(i=0; i<num_intervals; i++)
{
// first and last vertices in shard
shard[i].Vstart = interval[i].start;
shard[i].Vend = interval[i].end;
shard[i].E = prefixV[interval[i].end];
shard[i].vmap = (int *) malloc(MAX_NUM_VERTICES_SHARD*sizeof(int));
shard[i].from = (vertex_t *) malloc(MAX_NUM_EDGES_SHARD*sizeof(vertex_t));
shard[i].to = (vertex_t *) malloc(MAX_NUM_EDGES_SHARD*sizeof(vertex_t));
}
for(i=0; i<num_intervals; i++)
{
vector<edge_t> tempEdges;
for(j=interval[i].start; j<=interval[i].end; j++)
{
for(vector<edge_t>::iterator it=outEdges[j].begin(); it!=outEdges[j].end(); ++it)
tempEdges.push_back(*it);
}
//Sorting based on src vertex to align the edges such that the access of vertices[src] is sequential
sort(tempEdges.begin(),tempEdges.end(),cost);
vector< vector<edge_t> > bucket(MAX_NUM_VERTICES_SHARD);
for (vector<edge_t>::iterator it = tempEdges.begin() ; it != tempEdges.end(); ++it)
{
bucket[(*it).src-interval[i].start].push_back(*it);
}
for(j=0;j<MAX_NUM_VERTICES_SHARD;j++)
{
shard[i].vmap[j] = bucket[j].size();
}
for(j=1;j<MAX_NUM_VERTICES_SHARD;j++)
{
shard[i].vmap[j] += shard[i].vmap[j-1];
}
k=0;
for(j=0;j<MAX_NUM_VERTICES_SHARD;j++)
{
for (vector<edge_t>::iterator it = bucket[j].begin() ; it != bucket[j].end(); ++it)
{
shard[i].from[k].val = (*it).src;
shard[i].to[k].val = (*it).dest;
k++;
}
}
}
gettimeofday(&t2,NULL);
printf("Time to construct shards : %f sec\n",((t2.tv_sec+t2.tv_usec*1.0e-6)-(t1.tv_sec+t1.tv_usec*1.0e-6)));
cudaStream_t * str;
cudaEvent_t * start;
cudaEvent_t * stop;
int num_evts=2;
str = (cudaStream_t *) malloc(num_evts * sizeof(cudaStream_t));
start = (cudaEvent_t *) malloc(num_evts * sizeof(cudaEvent_t));
stop = (cudaEvent_t *) malloc(num_evts * sizeof(cudaEvent_t));
for(int i = 0; i < num_evts; i++)
{
CUDA_SAFE_CALL(cudaStreamCreate(&(str[i])));
CUDA_SAFE_CALL(cudaEventCreate(&(start[i])));
CUDA_SAFE_CALL(cudaEventCreate(&(stop[i])));
}
// It will contain the visited status of each vertex
vertex_t *vertices;
//CUDA_SAFE_CALL(cudaMallocHost((void **)&vertices, num_vertices*sizeof(vertex_t)));
vertex_t *vertices_host = (vertex_t *) malloc(num_vertices*sizeof(vertex_t));
CUDA_SAFE_CALL(cudaMalloc((void **)&vertices, num_vertices*sizeof(vertex_t)));
init<<<((num_vertices+MAX_THREADS_PER_BLOCK-1)/MAX_THREADS_PER_BLOCK),MAX_THREADS_PER_BLOCK>>> (vertices, 0, num_vertices);
float * diff = (float *) malloc(num_intervals*sizeof(float));
double time = 0;
// For vertex centric algo
shard_t * shard_dev;
int * vmap_dev;
vertex_t * from_dev;
vertex_t * to_dev;
CUDA_SAFE_CALL(cudaMalloc((void **)&shard_dev, sizeof(shard_t)));
CUDA_SAFE_CALL(cudaMalloc((void **)&vmap_dev, MAX_NUM_VERTICES_SHARD*sizeof(int)));
CUDA_SAFE_CALL(cudaMalloc((void **)&from_dev, MAX_NUM_EDGES_SHARD*sizeof(vertex_t)));
CUDA_SAFE_CALL(cudaMalloc((void **)&to_dev, MAX_NUM_EDGES_SHARD*sizeof(vertex_t)));
//Extra Buffer for doing double bufferring
shard_t * shard_dev2;
int * vmap_dev2;
vertex_t * from_dev2;
vertex_t * to_dev2;
CUDA_SAFE_CALL(cudaMalloc((void **)&shard_dev2, sizeof(shard_t)));
CUDA_SAFE_CALL(cudaMalloc((void **)&vmap_dev2, MAX_NUM_VERTICES_SHARD*sizeof(int)));
CUDA_SAFE_CALL(cudaMalloc((void **)&from_dev2, MAX_NUM_EDGES_SHARD*sizeof(vertex_t)));
CUDA_SAFE_CALL(cudaMalloc((void **)&to_dev2, MAX_NUM_EDGES_SHARD*sizeof(vertex_t)));
int num_of_blocks = 1;
//int MAX_THREADS = MAX_NUM_VERTICES_SHARD;
int MAX_THREADS = MAX_NUM_EDGES_SHARD;
int num_of_threads_per_block = MAX_THREADS;
if(MAX_THREADS>MAX_THREADS_PER_BLOCK)
{
num_of_blocks = (int)ceil(MAX_THREADS/(double)MAX_THREADS_PER_BLOCK);
num_of_threads_per_block = MAX_THREADS_PER_BLOCK;
}
dim3 grid( num_of_blocks, 1, 1);
dim3 threads( num_of_threads_per_block, 1, 1);
printf("Begin kernel\n");
int pingpong;
bool over;
k=0;
do
{
over = false;
CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_over, &over, sizeof(bool),0, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaDeviceSynchronize());
pingpong=0;
for(i=0; i<num_intervals; i++)
{
if(pingpong==0)
{
//Copy Ping
CUDA_SAFE_CALL(cudaMemcpyAsync(shard_dev, &shard[i], sizeof(shard_t),cudaMemcpyHostToDevice,str[0]));
CUDA_SAFE_CALL(cudaMemcpyAsync(vmap_dev, shard[i].vmap, MAX_NUM_VERTICES_SHARD*sizeof(int),cudaMemcpyHostToDevice,str[0]));
CUDA_SAFE_CALL(cudaMemcpyAsync(from_dev, shard[i].from, MAX_NUM_EDGES_SHARD*sizeof(vertex_t),cudaMemcpyHostToDevice,str[0]));
CUDA_SAFE_CALL(cudaMemcpyAsync(to_dev, shard[i].to, MAX_NUM_EDGES_SHARD*sizeof(vertex_t),cudaMemcpyHostToDevice,str[0]));
CUDA_SAFE_CALL(cudaMemcpyAsync(&(shard_dev->vmap), &vmap_dev, sizeof(int *),cudaMemcpyHostToDevice,str[0]));
CUDA_SAFE_CALL(cudaMemcpyAsync(&(shard_dev->from), &from_dev, sizeof(vertex_t *),cudaMemcpyHostToDevice,str[0]));
CUDA_SAFE_CALL(cudaMemcpyAsync(&(shard_dev->to), &to_dev, sizeof(vertex_t *),cudaMemcpyHostToDevice,str[0]));
if(i>0)
{
//Process Pong
CUDA_SAFE_CALL(cudaEventRecord(start[1],str[1]));
scatter_bfs_edge<<<grid, threads,0,str[1]>>> (shard_dev2, vertices, k);
CUDA_SAFE_CALL(cudaStreamSynchronize(str[1]));
CUDA_SAFE_CALL(cudaEventRecord(stop[1],str[1]));
CUDA_SAFE_CALL(cudaEventSynchronize(stop[1]));
CUDA_SAFE_CALL(cudaEventElapsedTime(&diff[i-1],start[1],stop[1]));
}
pingpong=1;
}
else
{
//Copy Pong
CUDA_SAFE_CALL(cudaMemcpyAsync(shard_dev2, &shard[i], sizeof(shard_t),cudaMemcpyHostToDevice,str[1]));
CUDA_SAFE_CALL(cudaMemcpyAsync(vmap_dev2, shard[i].vmap, MAX_NUM_VERTICES_SHARD*sizeof(int),cudaMemcpyHostToDevice,str[1]));
CUDA_SAFE_CALL(cudaMemcpyAsync(from_dev2, shard[i].from, MAX_NUM_EDGES_SHARD*sizeof(vertex_t),cudaMemcpyHostToDevice,str[1]));
CUDA_SAFE_CALL(cudaMemcpyAsync(to_dev2, shard[i].to, MAX_NUM_EDGES_SHARD*sizeof(vertex_t),cudaMemcpyHostToDevice,str[1]));
CUDA_SAFE_CALL(cudaMemcpyAsync(&(shard_dev2->vmap), &vmap_dev2, sizeof(int *),cudaMemcpyHostToDevice,str[1]));
CUDA_SAFE_CALL(cudaMemcpyAsync(&(shard_dev2->from), &from_dev2, sizeof(vertex_t *),cudaMemcpyHostToDevice,str[1]));
CUDA_SAFE_CALL(cudaMemcpyAsync(&(shard_dev2->to), &to_dev2, sizeof(vertex_t *),cudaMemcpyHostToDevice,str[1]));
//Process Pong
CUDA_SAFE_CALL(cudaEventRecord(start[0],str[0]));
scatter_bfs_edge<<<grid, threads,0,str[0]>>> (shard_dev, vertices, k);
CUDA_SAFE_CALL(cudaStreamSynchronize(str[0]));
CUDA_SAFE_CALL(cudaEventRecord(stop[0],str[0]));
CUDA_SAFE_CALL(cudaEventSynchronize(stop[0]));
CUDA_SAFE_CALL(cudaEventElapsedTime(&diff[i-1],start[0],stop[0]));
pingpong=0;
}
}
if(pingpong==0)
{
//Process Pong
CUDA_SAFE_CALL(cudaEventRecord(start[1],str[1]));
scatter_bfs_edge<<<grid, threads,0,str[1]>>> (shard_dev2, vertices, k);
CUDA_SAFE_CALL(cudaStreamSynchronize(str[1]));
CUDA_SAFE_CALL(cudaEventRecord(stop[1],str[1]));
CUDA_SAFE_CALL(cudaEventSynchronize(stop[1]));
CUDA_SAFE_CALL(cudaEventElapsedTime(&diff[i-1],start[1],stop[1]));
}
else
{
//Process Pong
CUDA_SAFE_CALL(cudaEventRecord(start[0],str[0]));
scatter_bfs_edge<<<grid, threads,0,str[0]>>> (shard_dev, vertices, k);
CUDA_SAFE_CALL(cudaStreamSynchronize(str[0]));
CUDA_SAFE_CALL(cudaEventRecord(stop[0],str[0]));
CUDA_SAFE_CALL(cudaEventSynchronize(stop[0]));
CUDA_SAFE_CALL(cudaEventElapsedTime(&diff[i-1],start[1],stop[1]));
}
for(i=0;i<num_intervals;i++)
time += diff[i];
CUDA_SAFE_CALL(cudaMemcpyFromSymbol(&over, d_over, sizeof(bool),0, cudaMemcpyDeviceToHost));
k++;
}while(over);
printf("Number of iterations : %d\n",k);
/* CUDA_SAFE_CALL(cudaMemcpy(vertices_host, vertices, num_vertices*sizeof(vertex_t), cudaMemcpyDeviceToHost));
for(int i = 0; i < num_vertices; i++)
{
printf("Vertex %d Distance %d\n",i,vertices_host[i].val);
}
*/
printf("Time: %f ms\n",time);
for(int i = 0; i < num_evts; i++)
{
CUDA_SAFE_CALL(cudaStreamDestroy(str[i]));
CUDA_SAFE_CALL(cudaEventDestroy(start[i]));
CUDA_SAFE_CALL(cudaEventDestroy(stop[i]));
}
free(interval);
for(i=0; i<num_intervals; i++)
{
free(shard[i].vmap);
free(shard[i].from);
free(shard[i].to);
}
free(shard);
free(vertices_host);
CUDA_SAFE_CALL(cudaFree(vertices));
CUDA_SAFE_CALL(cudaFree(vmap_dev));
CUDA_SAFE_CALL(cudaFree(from_dev));
CUDA_SAFE_CALL(cudaFree(to_dev));
CUDA_SAFE_CALL(cudaFree(shard_dev));
CUDA_SAFE_CALL(cudaFree(vmap_dev2));
CUDA_SAFE_CALL(cudaFree(from_dev2));
CUDA_SAFE_CALL(cudaFree(to_dev2));
CUDA_SAFE_CALL(cudaFree(shard_dev2));
return 0;
}
|
d05ff6f2ee8ace0269a6c2a3e93d52a392d1e888.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Author: Vanessa Aguiar-Pulido
* Postdoctoral Research Associate
* Bioinformatics Research Group (BioRG)
* Florida International University (FIU)
* Miami, FL, USA
*
* Contact: [email protected] or [email protected]
*/
#include "CScore.h"
#include <sys/time.h>
#include "scoreReads.h"
#include <math.h>
CScore::CScore() {
}
CScore::~CScore() {
}
float CScore::scoreRead(string read, int order, map<string, int> kmerMap, vector<float> model) {
float score = 0.0;
bool first = true;
int mappedIndex = -1;
string tmpKmer = read.substr(0, order); //From the beginning, take k characters;
//We need to calculate the initial probabilities
while(first) {
try {
mappedIndex = kmerMap.at(tmpKmer);
score += model.at(mappedIndex);
first = false;
} catch (...) { //If there is an N in the initial k-mer, iterate past it
tmpKmer.erase(tmpKmer.begin());
tmpKmer.push_back((char)read.at(order));
order++;
}
}
//cout << "tmpKmer " << order << ": " << tmpKmer << "\n";
tmpKmer.push_back((char)read.at(order)); //First (k+1)-mer
for(int j=order+1; j<read.length(); j++) { //Calculate the score of a read
//cout << "tmpKmer " << j << ": " << tmpKmer << "\n";
try {
mappedIndex = kmerMap.at(tmpKmer); //This will return the position of the kmer
score += model.at(mappedIndex);
//cout << "Partial score "<< j << ": " << score << "\n";
} catch (...) {} //If there's an N, just skip it
tmpKmer.erase(tmpKmer.begin());
tmpKmer.push_back((char)read.at(j));
}
//cout << "tmpKmer " << read.length() << ": " << tmpKmer << "\n";
//We need to add the last kmer
try {
mappedIndex = kmerMap.at(tmpKmer);
score += model.at(mappedIndex);
} catch (...) {} //If there's an N, just skip it
//cout << "Score for read "<< read << ": " << score << "\n";
return score;
}
void CScore::scoreModels(string modelsPath, string readsFileName, string outputFile, int order) {
ifstream listFile, modelFile;
ofstream scoreResults;
string modelName="", modelFull="";
vector<float> model;
float value = 0.0;
int index=-1;//, mappedIndex=-1;
string tmpKmer="", tmpRead="";
//float tmpScore=0.0;
//Prepare to load the reads
CSequences* reads = new CSequences(readsFileName);
// GPU arrays
int num_seq = reads->getSequences().size();
int read_length = reads->getSequences()[0].size(); // TMC for now assuming same length
int nucleotides = num_seq*read_length;
float* cpu_scores = (float*) malloc(num_seq*sizeof(float));
char* cpu_genome = (char*) malloc(nucleotides*sizeof(char));
// data() does not work, have to copy manually for now.
for (int i = 0; i < num_seq; i++) {
cout << "Sequence " << i << ": " << reads->getSequences()[i] << endl;
for (int j = 0; j < read_length; j++) {
cpu_genome[i*read_length+j] = reads->getSequences()[i][j];
}
}
cout << "CPU GENOME: " << string(cpu_genome) << endl;
char* gpu_genome;
hipMalloc((void**) &gpu_genome, nucleotides*sizeof(char));
hipMemcpy(gpu_genome, cpu_genome, nucleotides*sizeof(char), hipMemcpyHostToDevice); // TMC I know this works for vectors of ints, need to check vectors of strings
//Prepare to get the list of possible kmers for a model
CKmers* kmers = new CKmers(order);
order++;
//Get the full list of models
if(modelsPath.compare(modelsPath.length()-1,1,"/") !=0) {
modelsPath += "/";
}
string command = "ls "+ modelsPath +" > models.txt";
system(command.c_str());
//Open the file containing the names of the models
listFile.open("models.txt");
//cout << "Let's open the models file\n";
if (listFile.is_open()) {
while(getline(listFile,modelName)) { //Retrieve the name of the model
modelFull = modelsPath + modelName;
//cout << "ModelFull: " << modelFull << "\n";
modelFile.open(modelFull.c_str()); //Open the file that contains the probabilities
if (modelFile.is_open()) {
try { //In case there's something in the model's folder that shouldn't be there
int num_models = pow(4,order) + pow(4, order-1);
float* cpu_model = (float*) malloc(num_models * sizeof(float));
cout << "Model: " << modelName << "\n";
int i=0;
while(i < num_models && modelFile >> index >> value) {
cpu_model[i] = value;
//model.push_back(value); //Store the model values
//cout << "Model value: " << value << "\n";
i++;
}
//cout << "Model size: " << model.size() << "\n";
//cout << "First element: " << model.data()[0] << "\n";
float* gpu_model;
hipMalloc((void**) &gpu_model, num_models*sizeof(float));
hipMemcpy(gpu_model, cpu_model, num_models*sizeof(float), hipMemcpyHostToDevice);
float* gpu_scores;
hipMalloc((void**) &gpu_scores, num_seq*sizeof(float));
//For each read calculate the score for the model
// Call with num_seq blocks of order threads.
int num_kmers = read_length - order + 1 + 1;
//printf("Calling kernel.\n");
//cout << "Using " << num_seq << " blocks of " << num_kmers << " threads. Shared memory contains " << num_kmers << " floats." << endl;
hipLaunchKernelGGL(( scoreReads), dim3(num_seq), dim3(num_kmers), num_kmers*sizeof(float), 0, gpu_genome, read_length, order, gpu_model, gpu_scores);
hipDeviceSynchronize();
//printf("Called kernel.\n");
hipError_t error = hipGetLastError();
if(error != hipSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", hipGetErrorString(error));
exit(-1);
}
hipMemcpy(cpu_scores, gpu_scores, num_seq*sizeof(float), hipMemcpyDeviceToHost);
//if (cpu_scores[0] != cpu_scores[1])
// cout << "Warning: Sequence 0 has " << cpu_scores[0] << " and Sequence 1 has " << cpu_scores[1] << endl;
hipFree(gpu_model);
hipFree(gpu_scores);
free(cpu_model);
for(int i=0; i<reads->getSequences().size(); i++) {
//tmpScore = this->scoreRead((string)reads->getSequences().at(i), order, kmers->getKmerList(), model);
//cout << "Score for read "<< i << ": " << cpu_scores[i] << "\n";
//Replace the score stored if the new score is higher
// TMC for now removing, since we are only doing one model
//cout << "Score for sequence " << i << " (press return to continue): " << cpu_scores[i] << endl;
//int x;cin >> x;
if(this->scores.size() < reads->getSequences().size()) {
this->scores.push_back(cpu_scores[i]);
this->modelNames.push_back((string)modelName.substr(0,modelName.find(".")));
}
else {
if (cpu_scores[i] > this->scores.at(i)) {
this->scores.at(i) = cpu_scores[i];
this->modelNames.at(i) = modelName.substr(0,modelName.find("."));
}
}
// exit(1);
} //End while scoring reads
} catch(...) {}
modelFile.close();
//cout << "Model cleared." << endl;
model.clear();
} //End if model was loaded
} //End while reading models
//Write the final scores to a file
scoreResults.open(outputFile.c_str());
if (scoreResults.is_open()) {
scoreResults << "Best score\tBest model\n";
for(int i=0; i < this->scores.size(); i++) {
scoreResults << this->scores.at(i) << "\t" << this->modelNames.at(i) << "\n";
}
scoreResults.close();
}
listFile.close();
} //End if list of models was read
reads->~CSequences();
kmers->~CKmers();
}
/*
int main(int argc, char* argv[]) {
//string pathToModels = "/scratch/giri_projects/vanessa/Azad/scripts/model_database/";
//string pathToModels = "/Users/vanessa/Documents/Work/ResearchInProgress/BioRG/Metagenomics/smallList/";
string pathToModels = "/Users/vanessa/Documents/Work/ResearchInProgress/BioRG/Metagenomics/signature_6order/";
CScore* s = new CScore();
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
//for(int i=0; i<1000; i++) {
//s->scoreModels(pathToModels,"test.fa","scores.txt",6);
//}
s->scoreModels(pathToModels,"test.fa","scores.txt",6);
//s->scoreModels(pathToModels,"test.fa","scores.txt",8);
gettimeofday(&tv2, NULL);
double tm = (double) (tv2.tv_usec - tv1.tv_usec)/1000000 + (double) (tv2.tv_sec - tv1.tv_sec);
cout << "Time taken in execution = " << tm << " seconds\n";
return 0;
}
*/
|
d05ff6f2ee8ace0269a6c2a3e93d52a392d1e888.cu
|
/*
* Author: Vanessa Aguiar-Pulido
* Postdoctoral Research Associate
* Bioinformatics Research Group (BioRG)
* Florida International University (FIU)
* Miami, FL, USA
*
* Contact: [email protected] or [email protected]
*/
#include "CScore.h"
#include <sys/time.h>
#include "scoreReads.h"
#include <math.h>
CScore::CScore() {
}
CScore::~CScore() {
}
float CScore::scoreRead(string read, int order, map<string, int> kmerMap, vector<float> model) {
float score = 0.0;
bool first = true;
int mappedIndex = -1;
string tmpKmer = read.substr(0, order); //From the beginning, take k characters;
//We need to calculate the initial probabilities
while(first) {
try {
mappedIndex = kmerMap.at(tmpKmer);
score += model.at(mappedIndex);
first = false;
} catch (...) { //If there is an N in the initial k-mer, iterate past it
tmpKmer.erase(tmpKmer.begin());
tmpKmer.push_back((char)read.at(order));
order++;
}
}
//cout << "tmpKmer " << order << ": " << tmpKmer << "\n";
tmpKmer.push_back((char)read.at(order)); //First (k+1)-mer
for(int j=order+1; j<read.length(); j++) { //Calculate the score of a read
//cout << "tmpKmer " << j << ": " << tmpKmer << "\n";
try {
mappedIndex = kmerMap.at(tmpKmer); //This will return the position of the kmer
score += model.at(mappedIndex);
//cout << "Partial score "<< j << ": " << score << "\n";
} catch (...) {} //If there's an N, just skip it
tmpKmer.erase(tmpKmer.begin());
tmpKmer.push_back((char)read.at(j));
}
//cout << "tmpKmer " << read.length() << ": " << tmpKmer << "\n";
//We need to add the last kmer
try {
mappedIndex = kmerMap.at(tmpKmer);
score += model.at(mappedIndex);
} catch (...) {} //If there's an N, just skip it
//cout << "Score for read "<< read << ": " << score << "\n";
return score;
}
void CScore::scoreModels(string modelsPath, string readsFileName, string outputFile, int order) {
ifstream listFile, modelFile;
ofstream scoreResults;
string modelName="", modelFull="";
vector<float> model;
float value = 0.0;
int index=-1;//, mappedIndex=-1;
string tmpKmer="", tmpRead="";
//float tmpScore=0.0;
//Prepare to load the reads
CSequences* reads = new CSequences(readsFileName);
// GPU arrays
int num_seq = reads->getSequences().size();
int read_length = reads->getSequences()[0].size(); // TMC for now assuming same length
int nucleotides = num_seq*read_length;
float* cpu_scores = (float*) malloc(num_seq*sizeof(float));
char* cpu_genome = (char*) malloc(nucleotides*sizeof(char));
// data() does not work, have to copy manually for now.
for (int i = 0; i < num_seq; i++) {
cout << "Sequence " << i << ": " << reads->getSequences()[i] << endl;
for (int j = 0; j < read_length; j++) {
cpu_genome[i*read_length+j] = reads->getSequences()[i][j];
}
}
cout << "CPU GENOME: " << string(cpu_genome) << endl;
char* gpu_genome;
cudaMalloc((void**) &gpu_genome, nucleotides*sizeof(char));
cudaMemcpy(gpu_genome, cpu_genome, nucleotides*sizeof(char), cudaMemcpyHostToDevice); // TMC I know this works for vectors of ints, need to check vectors of strings
//Prepare to get the list of possible kmers for a model
CKmers* kmers = new CKmers(order);
order++;
//Get the full list of models
if(modelsPath.compare(modelsPath.length()-1,1,"/") !=0) {
modelsPath += "/";
}
string command = "ls "+ modelsPath +" > models.txt";
system(command.c_str());
//Open the file containing the names of the models
listFile.open("models.txt");
//cout << "Let's open the models file\n";
if (listFile.is_open()) {
while(getline(listFile,modelName)) { //Retrieve the name of the model
modelFull = modelsPath + modelName;
//cout << "ModelFull: " << modelFull << "\n";
modelFile.open(modelFull.c_str()); //Open the file that contains the probabilities
if (modelFile.is_open()) {
try { //In case there's something in the model's folder that shouldn't be there
int num_models = pow(4,order) + pow(4, order-1);
float* cpu_model = (float*) malloc(num_models * sizeof(float));
cout << "Model: " << modelName << "\n";
int i=0;
while(i < num_models && modelFile >> index >> value) {
cpu_model[i] = value;
//model.push_back(value); //Store the model values
//cout << "Model value: " << value << "\n";
i++;
}
//cout << "Model size: " << model.size() << "\n";
//cout << "First element: " << model.data()[0] << "\n";
float* gpu_model;
cudaMalloc((void**) &gpu_model, num_models*sizeof(float));
cudaMemcpy(gpu_model, cpu_model, num_models*sizeof(float), cudaMemcpyHostToDevice);
float* gpu_scores;
cudaMalloc((void**) &gpu_scores, num_seq*sizeof(float));
//For each read calculate the score for the model
// Call with num_seq blocks of order threads.
int num_kmers = read_length - order + 1 + 1;
//printf("Calling kernel.\n");
//cout << "Using " << num_seq << " blocks of " << num_kmers << " threads. Shared memory contains " << num_kmers << " floats." << endl;
scoreReads<<<num_seq, num_kmers, num_kmers*sizeof(float)>>>(gpu_genome, read_length, order, gpu_model, gpu_scores);
cudaDeviceSynchronize();
//printf("Called kernel.\n");
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
cudaMemcpy(cpu_scores, gpu_scores, num_seq*sizeof(float), cudaMemcpyDeviceToHost);
//if (cpu_scores[0] != cpu_scores[1])
// cout << "Warning: Sequence 0 has " << cpu_scores[0] << " and Sequence 1 has " << cpu_scores[1] << endl;
cudaFree(gpu_model);
cudaFree(gpu_scores);
free(cpu_model);
for(int i=0; i<reads->getSequences().size(); i++) {
//tmpScore = this->scoreRead((string)reads->getSequences().at(i), order, kmers->getKmerList(), model);
//cout << "Score for read "<< i << ": " << cpu_scores[i] << "\n";
//Replace the score stored if the new score is higher
// TMC for now removing, since we are only doing one model
//cout << "Score for sequence " << i << " (press return to continue): " << cpu_scores[i] << endl;
//int x;cin >> x;
if(this->scores.size() < reads->getSequences().size()) {
this->scores.push_back(cpu_scores[i]);
this->modelNames.push_back((string)modelName.substr(0,modelName.find(".")));
}
else {
if (cpu_scores[i] > this->scores.at(i)) {
this->scores.at(i) = cpu_scores[i];
this->modelNames.at(i) = modelName.substr(0,modelName.find("."));
}
}
// exit(1);
} //End while scoring reads
} catch(...) {}
modelFile.close();
//cout << "Model cleared." << endl;
model.clear();
} //End if model was loaded
} //End while reading models
//Write the final scores to a file
scoreResults.open(outputFile.c_str());
if (scoreResults.is_open()) {
scoreResults << "Best score\tBest model\n";
for(int i=0; i < this->scores.size(); i++) {
scoreResults << this->scores.at(i) << "\t" << this->modelNames.at(i) << "\n";
}
scoreResults.close();
}
listFile.close();
} //End if list of models was read
reads->~CSequences();
kmers->~CKmers();
}
/*
int main(int argc, char* argv[]) {
//string pathToModels = "/scratch/giri_projects/vanessa/Azad/scripts/model_database/";
//string pathToModels = "/Users/vanessa/Documents/Work/ResearchInProgress/BioRG/Metagenomics/smallList/";
string pathToModels = "/Users/vanessa/Documents/Work/ResearchInProgress/BioRG/Metagenomics/signature_6order/";
CScore* s = new CScore();
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
//for(int i=0; i<1000; i++) {
//s->scoreModels(pathToModels,"test.fa","scores.txt",6);
//}
s->scoreModels(pathToModels,"test.fa","scores.txt",6);
//s->scoreModels(pathToModels,"test.fa","scores.txt",8);
gettimeofday(&tv2, NULL);
double tm = (double) (tv2.tv_usec - tv1.tv_usec)/1000000 + (double) (tv2.tv_sec - tv1.tv_sec);
cout << "Time taken in execution = " << tm << " seconds\n";
return 0;
}
*/
|
0fffe4b80668b912afb78f77e6a9a43556fb0270.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <time.h>
#include "hip/hip_runtime.h"
#include "functions.c"
//compute a*b mod p safely
__device__ unsigned int modprodcu(unsigned int a, unsigned int b, unsigned int p) {
unsigned int za = a;
unsigned int ab = 0;
while (b > 0) {
if (b%2 == 1) ab = (ab + za) % p;
za = (2 * za) % p;
b /= 2;
}
return ab;
}
//compute a^b mod p safely
__device__ unsigned int modExpcu(unsigned int a, unsigned int b, unsigned int p) {
unsigned int z = a;
unsigned int aExpb = 1;
while (b > 0) {
if (b%2 == 1) aExpb = modprodcu(aExpb, z, p);
z = modprodcu(z, z, p);
b /= 2;
}
return aExpb;
}
__global__ void search(unsigned int p, unsigned int g, unsigned int h, unsigned int* x){
unsigned int myX = (unsigned int)(threadIdx.x+blockIdx.x*blockDim.x);
unsigned int myY = (unsigned int)(threadIdx.y+blockIdx.y*blockDim.y);
//find the secret key
unsigned int i = myY*blockDim.x*gridDim.x+myX;
if(i < p) {
if (modExpcu(g,i+1,p)==h)
*x=i+1;
}
}
int main (int argc, char **argv) {
/* Part 2. Start this program by first copying the contents of the main function from
your completed decrypt.c main function. */
//declare storage for an ElGamal cryptosytem
unsigned int n, p, g, h;
unsigned int Nints;
//get the secret key from the user
//printf("Enter the secret key (0 if unknown): "); fflush(stdout);
//char stat = scanf("%u",&x);
unsigned int* h_x = (unsigned int*)malloc(sizeof(unsigned int));
*h_x = 0;
//printf("Reading file.\n");
FILE* f = fopen("bonus_public_key.txt", "r");
fscanf(f, "%u\n%u\n%u\n%u\n", &n, &p, &g, &h);
fclose(f);
f = fopen("bonus_message.txt", "r");
fscanf(f, "%u\n", &Nints);
unsigned int* Zmessage = (unsigned int*) malloc(Nints*sizeof(unsigned int));
unsigned int* a = (unsigned int*) malloc(Nints*sizeof(unsigned int));
for(int i = 0; i < Nints; i++){
fscanf(f, "%u %u\n", &Zmessage[i], &a[i]);
}
fclose(f);
//---------------------------------------------------------------------------------------------------------------
unsigned int* d_x;
hipMalloc(&d_x, sizeof(unsigned int));
dim3 B(32, 32, 1);
int N = (n-10+1)/2;
if(N < 0)
N = 0;
N = 1 << N;
dim3 G(N,N,1);
double startTime = clock();
hipLaunchKernelGGL(( search) , dim3(G),dim3(B) , 0, 0, p, g, h, d_x);
hipDeviceSynchronize();
double endTime = clock();
double totalTime = (endTime-startTime)/CLOCKS_PER_SEC;
double work = (double) p;
double throughput = work/totalTime;
printf("Searching all keys took %g seconds, throughput was %g values tested per second.\n", totalTime, throughput);
hipMemcpy(h_x,d_x,sizeof(unsigned int),hipMemcpyDeviceToHost);
printf("x=%u\n", *h_x);
hipFree(d_x);
//--------------------------------------------------------------------------------------------------------------
unsigned int Nchars = Nints*(n-1)/8;
printf("Nchars=%u\n", Nchars);
ElGamalDecrypt(Zmessage, a, Nints, p, *h_x);
unsigned char* message = (unsigned char*) malloc(Nchars*sizeof(unsigned char));
convertZToString(Zmessage, Nints, message, Nchars);
printf("Decrypted message: \"%s\"\n", message);
free(h_x);
return 0;
/* Q4 Make the search for the secret key parallel on the GPU using CUDA. */
}
|
0fffe4b80668b912afb78f77e6a9a43556fb0270.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <time.h>
#include "cuda.h"
#include "functions.c"
//compute a*b mod p safely
__device__ unsigned int modprodcu(unsigned int a, unsigned int b, unsigned int p) {
unsigned int za = a;
unsigned int ab = 0;
while (b > 0) {
if (b%2 == 1) ab = (ab + za) % p;
za = (2 * za) % p;
b /= 2;
}
return ab;
}
//compute a^b mod p safely
__device__ unsigned int modExpcu(unsigned int a, unsigned int b, unsigned int p) {
unsigned int z = a;
unsigned int aExpb = 1;
while (b > 0) {
if (b%2 == 1) aExpb = modprodcu(aExpb, z, p);
z = modprodcu(z, z, p);
b /= 2;
}
return aExpb;
}
__global__ void search(unsigned int p, unsigned int g, unsigned int h, unsigned int* x){
unsigned int myX = (unsigned int)(threadIdx.x+blockIdx.x*blockDim.x);
unsigned int myY = (unsigned int)(threadIdx.y+blockIdx.y*blockDim.y);
//find the secret key
unsigned int i = myY*blockDim.x*gridDim.x+myX;
if(i < p) {
if (modExpcu(g,i+1,p)==h)
*x=i+1;
}
}
int main (int argc, char **argv) {
/* Part 2. Start this program by first copying the contents of the main function from
your completed decrypt.c main function. */
//declare storage for an ElGamal cryptosytem
unsigned int n, p, g, h;
unsigned int Nints;
//get the secret key from the user
//printf("Enter the secret key (0 if unknown): "); fflush(stdout);
//char stat = scanf("%u",&x);
unsigned int* h_x = (unsigned int*)malloc(sizeof(unsigned int));
*h_x = 0;
//printf("Reading file.\n");
FILE* f = fopen("bonus_public_key.txt", "r");
fscanf(f, "%u\n%u\n%u\n%u\n", &n, &p, &g, &h);
fclose(f);
f = fopen("bonus_message.txt", "r");
fscanf(f, "%u\n", &Nints);
unsigned int* Zmessage = (unsigned int*) malloc(Nints*sizeof(unsigned int));
unsigned int* a = (unsigned int*) malloc(Nints*sizeof(unsigned int));
for(int i = 0; i < Nints; i++){
fscanf(f, "%u %u\n", &Zmessage[i], &a[i]);
}
fclose(f);
//---------------------------------------------------------------------------------------------------------------
unsigned int* d_x;
cudaMalloc(&d_x, sizeof(unsigned int));
dim3 B(32, 32, 1);
int N = (n-10+1)/2;
if(N < 0)
N = 0;
N = 1 << N;
dim3 G(N,N,1);
double startTime = clock();
search <<< G,B >>> (p, g, h, d_x);
cudaDeviceSynchronize();
double endTime = clock();
double totalTime = (endTime-startTime)/CLOCKS_PER_SEC;
double work = (double) p;
double throughput = work/totalTime;
printf("Searching all keys took %g seconds, throughput was %g values tested per second.\n", totalTime, throughput);
cudaMemcpy(h_x,d_x,sizeof(unsigned int),cudaMemcpyDeviceToHost);
printf("x=%u\n", *h_x);
cudaFree(d_x);
//--------------------------------------------------------------------------------------------------------------
unsigned int Nchars = Nints*(n-1)/8;
printf("Nchars=%u\n", Nchars);
ElGamalDecrypt(Zmessage, a, Nints, p, *h_x);
unsigned char* message = (unsigned char*) malloc(Nchars*sizeof(unsigned char));
convertZToString(Zmessage, Nints, message, Nchars);
printf("Decrypted message: \"%s\"\n", message);
free(h_x);
return 0;
/* Q4 Make the search for the secret key parallel on the GPU using CUDA. */
}
|
cc945b2f019540f9c4c493e1ec2166cecdb22b54.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <iostream>
#include <math.h>
#include <sstream>
#include <fstream>
#include<string.h>
//#include <bits/stdc++.h>
//#include <stdlib.h>
//#include <time.h>
using namespace std;
/***DEFINING THE DEFINES FOR THE ARRAY INDICES****************************/
//#define N 128
#define C 3
#define H 227
#define W 227
#define R 11
#define S 11
#define M 96
#define E 55
#define F 55
#define U 4
__global__
void ew_gpu_mmul(float* d_o, float* d_i, float* d_w, int width, int height, int stride, int ip_height, int wt_width, int num_wt,int num_img, int num_ch)
{
int row = threadIdx.y; int col = threadIdx.x;
__shared__ float s_w[C*R*S];
if(s_w[0] == 0)
{
for (int i=0; i<wt_width; i++){
for (int j=0; j<wt_width; j++){
for(int k=0; k<num_ch; k++){
s_w[k*wt_width*wt_width+(i*wt_width+j)] = d_w[(blockIdx.y)*num_ch*wt_width*wt_width+k*wt_width*wt_width+(i*wt_width+j)];
// __syncthreads();
}}}
}
//__syncthreads();
for(int x=0;x<2;x++){
for(int y=0;y<2;y++){
float prod = 0; float wt = 0;
for (int i=0; i<wt_width; i++){
for (int j=0; j<wt_width; j++){
for(int k=0; k<num_ch; k++){
float ip = d_i[blockIdx.x*num_ch*ip_height*ip_height+k*ip_height*ip_height+(stride*(row+y*blockDim.y)+i)*ip_height+(stride*(col+x*blockDim.x)+j)];
prod += ip*s_w[k*wt_width*wt_width+(i*wt_width+j)];
__syncthreads();
}
}
}
if((row+y*blockDim.y<height)&&(col+x*blockDim.x<width))
{if(prod>=0)
d_o[blockIdx.x*num_wt*height*width+blockIdx.y*width*height+(row+y*blockDim.y)*width+(col+x*blockDim.x)] =prod ;
}
}
}
if(s_w[0]!=0)
{
for (int i=0; i<wt_width; i++){
for (int j=0; j<wt_width; j++){
for(int k=0; k<num_ch; k++){
s_w[k*wt_width*wt_width+(i*wt_width+j)] = 0;
__syncthreads();
}
}
}
}
}
void element_wise_mmul(float* output, float* input, float* weight, int batch_size)
{
int x,y,i,j,m,n,k;
for(n=0; n<batch_size; n++){
for (m=0 ; m<M; m++){
for (x=0; x<F; x++){
for(y=0; y<E; y++){
// OP[x][y] = 0; // adding bias to output
for (i=0; i<R; i++){
for (j=0; j<S; j++){
for(k=0; k<C; k++){
float ip = input[n*C*H*W+k*H*W+(U*x+i)*H+(U*y+j)];
float wt = weight[m*C*R*S+k*R*S+i*S+j];
float prod = ip*wt;
if(prod>=0)
output[n*E*F*M+m*E*F+x*E+y] += prod;
//OP[x][y] += IP[U*x+i][U*y+j]*WT[i][j];
}}
}
}
}
}
}
}
int main(int argc, char* argv[])
{
int batch_size = atoi(argv[1]);
/*************INITALIZING MATRICES*********************************/
float *IP = (float*) malloc(batch_size*C*H*W*sizeof(float));
//float IP[H][W];
float *OP = (float*) malloc(batch_size*M*F*E*sizeof(float));
//float OP[F][E];
float *OPG = (float*) malloc(batch_size*M*F*E*sizeof(float));
float *WT = (float*) malloc(M*C*R*S*sizeof(float));
//float WT[R][S];
float* d_o;
float* d_i;
float* d_w;
//clock_t cpu_start, gpu_start, cpu_end, gpu_end;
//int a,b,c,d;
int c,d,m,n,k;
/*INITIALIZING WEIGHT MATRIX*/
for (m=0; m<M; m++){
for(k=0;k<C;k++){
for (c=0; c<R; c++){
for(d=0; d<S; d++){
//WT[c][d] = 2.0;
WT[m*C*R*S+k*R*S+c*S+d] = (float)rand()/(RAND_MAX+1.0);
}
}
}
}
/*INITIALIZING OUTPUT MATRIX*/
for (n=0; n<batch_size;n++){
for (m=0; m<M; m++){
for (c=0; c<F; c++){
for(d=0; d<E; d++){
//OP[c][d] = 0;
OP[n*M*F*E+m*F*E+c*E+d] = 0;
}
}
}
}
/*INITIALIZING INPUT MATRIX*/
for (n=0; n<batch_size; n++){
for(k=0;k<C;k++){
for (c=0; c<H; c++){
for(d=0; d<W; d++){
// IP[c][d] = (a+b+c+d);
// IP[n*C*H*W+k*H*W+c*W+d] = (c+1);
//if ((c<=1) || (d<=1) || (c>=29) || (d>=29))
//IP[n*C*H*W+k*H*W+c*W+d] = 0;
//else
IP[n*C*H*W+k*H*W+c*W+d] = (float)rand()/(RAND_MAX+1.0);
}
}
}
}
hipMalloc((void**) &d_i,batch_size*C*H*W*sizeof(float));
hipMemcpy(d_i, IP, batch_size*C*H*W*sizeof(float), hipMemcpyHostToDevice);
hipMalloc((void**) &d_w, M*C*R*S*sizeof(float));
hipMemcpy(d_w, WT, M*C*R*S*sizeof(float), hipMemcpyHostToDevice);
hipMalloc((void**) &d_o, batch_size*M*E*F*sizeof(float));
//clock_t start, end;
//start = clock();
//element_wise_mmul(OP, IP, WT, batch_size);
//end = clock();
//printf("cpu time is %f secs\n", (float)(end-start)/CLOCKS_PER_SEC);
dim3 dimGrid(batch_size,96,1);
dim3 dimBlock(28,28,1);
//gpu_start = clock();hipLaunchKernelGGL((
ew_gpu_mmul), dim3(dimGrid), dim3(dimBlock), 0, 0, d_o,d_i,d_w,55,55,4,227,11,96,batch_size,3);
//gpu_end = clock();
hipMemcpy(OPG,d_o, batch_size*M*E*F*sizeof(float), hipMemcpyDeviceToHost);
int g,h,s,u;
float max_error = 0;
string filename = "layer_1_"+to_string(batch_size);
ifstream fin(filename.c_str());
string line ;
for (u=0;u<batch_size;u++){
for (s=0;s<M;s++){
for (g=0; g<F; g++){
for(h=0; h<E; h++){
getline(fin,line);
float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-atof(line.c_str()));
// float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h]);
if (error > max_error)
max_error = error;
// printf("the output is %f for index %d, %d,%d,%d.\n",OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
// printf("diff CPU and GPU is %f for index %d,%d,%d,%d.\n", OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
// printf("the output from GPU is %f for index %d,%d,%d,%d.\n",OPG[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
}
}
}
}
fin.close();
printf("max error is %f\n", max_error);
//cout<<"time taken by cpu call is "<<((double)(cpu_end-cpu_start))/CLOCKS_PER_SEC<<"secs"<<endl;
//cout<<"time taken by gpu call is "<<((double)(gpu_end-gpu_start))/CLOCKS_PER_SEC<<"secs"<<endl;
hipFree(d_o);
hipFree(d_i);
hipFree(d_w);
free(OPG);
free(IP);
free(WT);
free(OP);
return 0;
}
|
cc945b2f019540f9c4c493e1ec2166cecdb22b54.cu
|
#include <stdio.h>
#include <iostream>
#include <math.h>
#include <sstream>
#include <fstream>
#include<string.h>
//#include <bits/stdc++.h>
//#include <stdlib.h>
//#include <time.h>
using namespace std;
/***DEFINING THE DEFINES FOR THE ARRAY INDICES****************************/
//#define N 128
#define C 3
#define H 227
#define W 227
#define R 11
#define S 11
#define M 96
#define E 55
#define F 55
#define U 4
__global__
void ew_gpu_mmul(float* d_o, float* d_i, float* d_w, int width, int height, int stride, int ip_height, int wt_width, int num_wt,int num_img, int num_ch)
{
int row = threadIdx.y; int col = threadIdx.x;
__shared__ float s_w[C*R*S];
if(s_w[0] == 0)
{
for (int i=0; i<wt_width; i++){
for (int j=0; j<wt_width; j++){
for(int k=0; k<num_ch; k++){
s_w[k*wt_width*wt_width+(i*wt_width+j)] = d_w[(blockIdx.y)*num_ch*wt_width*wt_width+k*wt_width*wt_width+(i*wt_width+j)];
// __syncthreads();
}}}
}
//__syncthreads();
for(int x=0;x<2;x++){
for(int y=0;y<2;y++){
float prod = 0; float wt = 0;
for (int i=0; i<wt_width; i++){
for (int j=0; j<wt_width; j++){
for(int k=0; k<num_ch; k++){
float ip = d_i[blockIdx.x*num_ch*ip_height*ip_height+k*ip_height*ip_height+(stride*(row+y*blockDim.y)+i)*ip_height+(stride*(col+x*blockDim.x)+j)];
prod += ip*s_w[k*wt_width*wt_width+(i*wt_width+j)];
__syncthreads();
}
}
}
if((row+y*blockDim.y<height)&&(col+x*blockDim.x<width))
{if(prod>=0)
d_o[blockIdx.x*num_wt*height*width+blockIdx.y*width*height+(row+y*blockDim.y)*width+(col+x*blockDim.x)] =prod ;
}
}
}
if(s_w[0]!=0)
{
for (int i=0; i<wt_width; i++){
for (int j=0; j<wt_width; j++){
for(int k=0; k<num_ch; k++){
s_w[k*wt_width*wt_width+(i*wt_width+j)] = 0;
__syncthreads();
}
}
}
}
}
void element_wise_mmul(float* output, float* input, float* weight, int batch_size)
{
int x,y,i,j,m,n,k;
for(n=0; n<batch_size; n++){
for (m=0 ; m<M; m++){
for (x=0; x<F; x++){
for(y=0; y<E; y++){
// OP[x][y] = 0; // adding bias to output
for (i=0; i<R; i++){
for (j=0; j<S; j++){
for(k=0; k<C; k++){
float ip = input[n*C*H*W+k*H*W+(U*x+i)*H+(U*y+j)];
float wt = weight[m*C*R*S+k*R*S+i*S+j];
float prod = ip*wt;
if(prod>=0)
output[n*E*F*M+m*E*F+x*E+y] += prod;
//OP[x][y] += IP[U*x+i][U*y+j]*WT[i][j];
}}
}
}
}
}
}
}
int main(int argc, char* argv[])
{
int batch_size = atoi(argv[1]);
/*************INITALIZING MATRICES*********************************/
float *IP = (float*) malloc(batch_size*C*H*W*sizeof(float));
//float IP[H][W];
float *OP = (float*) malloc(batch_size*M*F*E*sizeof(float));
//float OP[F][E];
float *OPG = (float*) malloc(batch_size*M*F*E*sizeof(float));
float *WT = (float*) malloc(M*C*R*S*sizeof(float));
//float WT[R][S];
float* d_o;
float* d_i;
float* d_w;
//clock_t cpu_start, gpu_start, cpu_end, gpu_end;
//int a,b,c,d;
int c,d,m,n,k;
/*INITIALIZING WEIGHT MATRIX*/
for (m=0; m<M; m++){
for(k=0;k<C;k++){
for (c=0; c<R; c++){
for(d=0; d<S; d++){
//WT[c][d] = 2.0;
WT[m*C*R*S+k*R*S+c*S+d] = (float)rand()/(RAND_MAX+1.0);
}
}
}
}
/*INITIALIZING OUTPUT MATRIX*/
for (n=0; n<batch_size;n++){
for (m=0; m<M; m++){
for (c=0; c<F; c++){
for(d=0; d<E; d++){
//OP[c][d] = 0;
OP[n*M*F*E+m*F*E+c*E+d] = 0;
}
}
}
}
/*INITIALIZING INPUT MATRIX*/
for (n=0; n<batch_size; n++){
for(k=0;k<C;k++){
for (c=0; c<H; c++){
for(d=0; d<W; d++){
// IP[c][d] = (a+b+c+d);
// IP[n*C*H*W+k*H*W+c*W+d] = (c+1);
//if ((c<=1) || (d<=1) || (c>=29) || (d>=29))
//IP[n*C*H*W+k*H*W+c*W+d] = 0;
//else
IP[n*C*H*W+k*H*W+c*W+d] = (float)rand()/(RAND_MAX+1.0);
}
}
}
}
cudaMalloc((void**) &d_i,batch_size*C*H*W*sizeof(float));
cudaMemcpy(d_i, IP, batch_size*C*H*W*sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc((void**) &d_w, M*C*R*S*sizeof(float));
cudaMemcpy(d_w, WT, M*C*R*S*sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc((void**) &d_o, batch_size*M*E*F*sizeof(float));
//clock_t start, end;
//start = clock();
//element_wise_mmul(OP, IP, WT, batch_size);
//end = clock();
//printf("cpu time is %f secs\n", (float)(end-start)/CLOCKS_PER_SEC);
dim3 dimGrid(batch_size,96,1);
dim3 dimBlock(28,28,1);
//gpu_start = clock();
ew_gpu_mmul<<<dimGrid, dimBlock>>>(d_o,d_i,d_w,55,55,4,227,11,96,batch_size,3);
//gpu_end = clock();
cudaMemcpy(OPG,d_o, batch_size*M*E*F*sizeof(float), cudaMemcpyDeviceToHost);
int g,h,s,u;
float max_error = 0;
string filename = "layer_1_"+to_string(batch_size);
ifstream fin(filename.c_str());
string line ;
for (u=0;u<batch_size;u++){
for (s=0;s<M;s++){
for (g=0; g<F; g++){
for(h=0; h<E; h++){
getline(fin,line);
float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-atof(line.c_str()));
// float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h]);
if (error > max_error)
max_error = error;
// printf("the output is %f for index %d, %d,%d,%d.\n",OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
// printf("diff CPU and GPU is %f for index %d,%d,%d,%d.\n", OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
// printf("the output from GPU is %f for index %d,%d,%d,%d.\n",OPG[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
}
}
}
}
fin.close();
printf("max error is %f\n", max_error);
//cout<<"time taken by cpu call is "<<((double)(cpu_end-cpu_start))/CLOCKS_PER_SEC<<"secs"<<endl;
//cout<<"time taken by gpu call is "<<((double)(gpu_end-gpu_start))/CLOCKS_PER_SEC<<"secs"<<endl;
cudaFree(d_o);
cudaFree(d_i);
cudaFree(d_w);
free(OPG);
free(IP);
free(WT);
free(OP);
return 0;
}
|
873052b77786aaf247217295583530f7e87bd246.hip
|
// !!! This is a file automatically generated by hipify!!!
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <pointercast.h>
#include <types/float16.h>
#include <op_boilerplate.h>
#include <loops/summarystatsreduce.h>
#include <helpers/shape.h>
#include <helpers/TAD.h>
#include <dll.h>
#include <Environment.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <cuda_launch_config.h>
#include <helpers/DebugHelper.h>
namespace functions {
namespace summarystats {
/**
* The driver interface for summary stats
* @param op the op number
* @param n the length
* @param dx the input
* @param xShapeInfo the shape information for x
* @param extraParams the extra parameters
* @param result the result buffer
* @param resultShapeInfo the shape information for the result
* @param gpuInformation the gpu information such as block dim, grid dim and shared memory
* @param dimension the dimension to execute along long
* @param dimensionLength the length of the dimension
* @param postProcessOrNot whether to post process or not
*/
template <typename T>
_CUDA_D void SummaryStatsReduce<T>::summaryStatsReduceGeneric(const int op, T *dx, Nd4jLong *xShapeInfo, int xRank, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot,bool biasCorrected, int *allocationBuffer, T *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
__shared__ UnifiedSharedMemory *manager;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
manager = new(shmem) UnifiedSharedMemory((int *) shmem);
manager->init(sizeof(UnifiedSharedMemory), 0, sizeof(functions::summarystats::SummaryStatsReduce<T>), sizeof(shape::TAD), xRank);
}
__syncthreads();
functions::summarystats::SummaryStatsReduce<T>::transform(
op,
dx,
xShapeInfo,
extraParams,
result,
resultShapeInfo,
dimension,
dimensionLength,
biasCorrected,
allocationBuffer,
reductionBuffer,
manager,
tadOnlyShapeInfo,
tadOffsets);
}
_CUDA_G void summaryStatsReduceDouble(int op, double *dx, Nd4jLong *xShapeInfo, int xRank, double *extraParams, double *result, Nd4jLong *resultShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot, bool biasCorrected, int *allocationBuffer, double *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
SummaryStatsReduce<double>::summaryStatsReduceGeneric(
op,
dx,
xShapeInfo, xRank,
extraParams,
result,
resultShapeInfo, zRank,
dimension,
dimensionLength,
postProcessOrNot,biasCorrected, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
}
_CUDA_G void summaryStatsReduceFloat(int op, float *dx, Nd4jLong *xShapeInfo, int xRank, float *extraParams, float *result, Nd4jLong *resultShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot,bool biasCorrected,int *allocationBuffer, float *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
SummaryStatsReduce<float>::summaryStatsReduceGeneric(
op,
dx,
xShapeInfo, xRank,
extraParams,
result,
resultShapeInfo, zRank,
dimension,
dimensionLength,
postProcessOrNot,biasCorrected, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
}
_CUDA_G void summaryStatsReduceHalf(int op, float16 *dx, Nd4jLong *xShapeInfo, int xRank, float16 *extraParams, float16 *result, Nd4jLong *resultShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot,bool biasCorrected,int *allocationBuffer, float16 *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
SummaryStatsReduce<float16>::summaryStatsReduceGeneric(
op,
dx,
xShapeInfo, xRank,
extraParams,
result,
resultShapeInfo, zRank,
dimension,
dimensionLength,
postProcessOrNot,biasCorrected, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
}
/*
template <typename T>
void __global__ SummaryStatsReduce<T>::summaryStatsReduceT(int op, T *dx, int *xShapeInfo, int xRank, T *extraParams, T *result, int *resultShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot,bool biasCorrected,int *allocationBuffer, T *reductionBuffer, int *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
summaryStatsReduceGeneric<T>(
op,
dx,
xShapeInfo, xRank,
extraParams,
result,
resultShapeInfo, zRank,
dimension,
dimensionLength,
postProcessOrNot,biasCorrected, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
}
*/
/**
*
* @param sPartialsRef
* @param tid
* @param extraParams
*/
template<typename T>
template<typename OpType>
_CUDA_D void SummaryStatsReduce<T>::aggregatePartials(SummaryStatsData<T> **sPartialsRef, Nd4jLong tid, Nd4jLong numElements, T *extraParams) {
// start the shared memory loop on the next power of 2 less
// than the block size. If block size is not a power of 2,
// accumulate the intermediate sums in the remainder range.
SummaryStatsData<T> *sPartials = *sPartialsRef;
Nd4jLong floorPow2 = blockDim.x;
if (floorPow2 & (floorPow2 - 1)) {
while (floorPow2 & (floorPow2 - 1)) {
floorPow2 &= floorPow2 - 1;
}
if (tid >= floorPow2) {
SummaryStatsData<T> prev = sPartials[tid - floorPow2];
SummaryStatsData<T> curr = sPartials[tid];
sPartials[tid - floorPow2] = update(prev, curr, extraParams);
}
__syncthreads();
}
for (Nd4jLong activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) {
if (tid < activeThreads && tid + activeThreads < numElements) {
SummaryStatsData<T> curr = sPartials[tid];
SummaryStatsData<T> next = sPartials[tid + activeThreads];
sPartials[tid] = update(curr, next, extraParams);
}
__syncthreads();
}
};
/**
* @param n n is the number of
* elements to loop through
* @param dx the data to operate on
* @param xVectorInfo the meta data for the vector:
* 0 is the offset
* 1 is the increment/stride
* 2 is the real length of the buffer (n and dx.length won't always be the same)
* 3 is the element wise stride for the buffer
* 4 is the number of elements it takes to get to the next row/column/tensor
* @param gpuInformation
* 0 is the block size
* 1 is the grid size
* 2 is the shared memory size
* @param problemDefinition
* 0 is the number of elements per vector
* 1 is the number of vectors
*/
template<typename T>
template<typename OpType>
_CUDA_D void SummaryStatsReduce<T>::transform(T *dx, Nd4jLong *xShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, T *reductionBuffer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
/**
* Gpu information for the problem
*/
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ volatile int resultScalar;
__shared__ int xElementWiseStride;
int numElements = blockDim.x;
//shared memory space for storing intermediate results
SummaryStatsData<T> *sPartials;
//functions::summarystats::SharedSummaryStatsData<T> holder;
sPartials = (SummaryStatsData<T> *) manager->getSharedReductionBuffer(); //holder.getPointer();
T startingVal = startingValue(dx);
SummaryStatsData<T> val;
val.initWithValue(startingVal);
val.n = 0;
sPartials[threadIdx.x] = val;
//length for the tad
__shared__ volatile int xLength;
__shared__ volatile int resultLength;
SummaryStatsData <T> reduction;
reduction.initWithValue(0.0);
reduction.n = 0;
if (threadIdx.x == 0) {
if (resultShapeInfo != nullptr)
resultLength = shape::length(resultShapeInfo);
else resultLength = 1;
if (dimensionLength == 1) {
if (dimension == nullptr || dimension[0] == MAX_DIMENSION)
resultScalar = 1;
else
resultScalar = 0;
}
else
resultScalar = 0;
if (resultLength == 1)
resultScalar = 1;
auto xStride = shape::stride(xShapeInfo);
auto xOrder = shape::order(xShapeInfo);
if (dimension != nullptr && (dimension[0] != MAX_DIMENSION && dimensionLength == 1)) {
xElementWiseStride = xStride[dimension[0]];
}
else {
xElementWiseStride = shape::elementWiseStride(xShapeInfo);
}
xLength = shape::length(xShapeInfo);
}
__syncthreads();
if (!resultScalar) {
__shared__ int tadLength;
__shared__ int tadEWS;
__shared__ int tadRank;
__shared__ int numTads;
__shared__ Nd4jLong *tadShape;
__shared__ Nd4jLong *tadStride;
if (threadIdx.x == 0) {
tadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength);
tadEWS = shape::elementWiseStride(tadOnlyShapeInfo);
tadRank = shape::rank(tadOnlyShapeInfo);
numTads = shape::length(xShapeInfo) / tadLength;
tadShape = shape::shapeOf(tadOnlyShapeInfo);
tadStride = shape::stride(tadOnlyShapeInfo);
}
__syncthreads();
if (dimensionLength > 1) {
Nd4jLong xCoord[MAX_RANK];
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
auto tadOffsetForBlock = tadOffsets[r];
val.initWithValue(startingVal);
val.n = 0;
sPartials[threadIdx.x] = val;
for (int i = threadIdx.x; i < tadLength; i += blockDim.x) {
shape::ind2subC(tadRank, tadShape, i, tadLength, xCoord);
Nd4jLong xOffset = shape::getOffset(tadOffsetForBlock, tadShape, tadStride, xCoord, tadRank);
SummaryStatsData <T> indexVal2;
indexVal2.initWithValue(dx[xOffset]);
sPartials[threadIdx.x] = update(sPartials[threadIdx.x], OpType::op(indexVal2, extraParams), extraParams);
}
__syncthreads();
aggregatePartials<OpType>(&sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraParams);
__syncthreads();
if (threadIdx.x == 0) {
result[r] = OpType::getValue(postProcessOrNot, sPartials[threadIdx.x]);
}
}
}
else {
for (int i = blockIdx.x; i < numTads; i += gridDim.x) {
auto tadOffsetForBlock = tadOffsets[i];
val.initWithValue(startingVal);
val.n = 0;
sPartials[threadIdx.x] = val;
auto indexX = tadOffsetForBlock + (xElementWiseStride * threadIdx.x);
if (threadIdx.x < tadLength) {
SummaryStatsData <T> indexVal;
indexVal.initWithValue(dx[indexX]);
sPartials[threadIdx.x] = OpType::op(indexVal, extraParams);
}
for (int x = threadIdx.x + blockDim.x; x < tadLength; x += blockDim.x) {
indexX = tadOffsetForBlock + x * tadEWS;
SummaryStatsData <T> indexVal2;
indexVal2.initWithValue(dx[indexX]);
sPartials[threadIdx.x] = update(sPartials[threadIdx.x], OpType::op(indexVal2, extraParams), extraParams);
}
__syncthreads();
aggregatePartials<OpType>(&sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraParams);
__syncthreads();
if (threadIdx.x == 0) {
result[i] = OpType::getValue(postProcessOrNot, sPartials[threadIdx.x]); //postProcess(sPartials[0],tadLength ,extraParams);
}
}
}
}
else if (resultScalar) {
__shared__ int n;
if (threadIdx.x == 0) {
xElementWiseStride = shape::elementWiseStride(xShapeInfo);
n = shape::length(xShapeInfo);
}
__syncthreads();
if (xElementWiseStride >= 1) {
for (Nd4jLong i = tid; i < n; i += (blockDim.x * gridDim.x)) {
SummaryStatsData <T> indexVal2;
indexVal2.initWithValue(dx[i * xElementWiseStride]);
reduction = update(reduction, indexVal2, extraParams);
}
}
else {
__shared__ int rank;
__shared__ Nd4jLong *xShape;
__shared__ Nd4jLong *xStride;
if (threadIdx.x == 0) {
rank = shape::rank(xShapeInfo);
xShape = shape::shapeOf(xShapeInfo);
xStride = shape::stride(xShapeInfo);
}
__syncthreads();
Nd4jLong ind2sub[MAX_RANK];
for (Nd4jLong i = tid; i < n; i += blockDim.x * gridDim.x) {
shape::ind2sub(rank, shape::shapeOf(xShapeInfo), i, n, ind2sub);
auto offset = shape::getOffset(0, xShape, xStride, ind2sub, rank);
SummaryStatsData <T> indexVal2;
indexVal2.initWithValue(dx[offset]);
reduction = update(reduction, indexVal2, extraParams);
}
}
sPartials[threadIdx.x] = reduction;
__syncthreads();
aggregatePartials<OpType>(&sPartials, threadIdx.x, blockDim.x, extraParams);
__syncthreads();
if (gridDim.x > 1) {
__shared__ bool amLast;
unsigned int *tc = (unsigned int *)reductionBuffer;
int rank = shape::rank(xShapeInfo);
tid = threadIdx.x;
if (threadIdx.x == 0) {
SummaryStatsData<T> *pBuffer = (SummaryStatsData<T> *) reductionBuffer;
pBuffer[blockIdx.x] = sPartials[0];
}
__syncthreads();
__threadfence();
if (tid == 0) {
unsigned int ticket = atomicInc(&tc[16384], gridDim.x);
amLast = (ticket == gridDim.x - 1);
}
__syncthreads();
if (amLast) {
tc[16384] = 0;
SummaryStatsData<T> *pBuffer = (SummaryStatsData<T> *) reductionBuffer;
T startingVal = startingValue(dx);
SummaryStatsData<T> val;
val.initWithValue(startingVal);
val.n = 0;
sPartials[threadIdx.x] = val;
for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) {
sPartials[threadIdx.x] = update(sPartials[threadIdx.x], pBuffer[i], extraParams);
}
__syncthreads();
aggregatePartials<OpType>(&sPartials, threadIdx.x, gridDim.x, extraParams);
__syncthreads();
if (tid == 0) {
result[0] = OpType::getValue(postProcessOrNot, sPartials[0]);
}
}
}
else {
if (tid == 0) {
unsigned int *tc = (unsigned *)reductionBuffer;
tc[16384] = 0;
result[0] = result[0] = OpType::getValue(postProcessOrNot, sPartials[0]);
}
}
}
};
template <typename T>
_CUDA_D void SummaryStatsReduce<T>::transform(const int opNum, T *dx, Nd4jLong *xShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, T *reductionBuffer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
DISPATCH_BY_OPNUM(transform, PARAMS(dx, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationBuffer, reductionBuffer, manager, tadOnlyShapeInfo, tadOffsets), SUMMARY_STATS_OPS);
};
template <>
_CUDA_H double SummaryStatsReduce<double>::execSummaryStatsReduceScalar(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, double *x, Nd4jLong *xShapeInfo, double *extraParams, bool biasCorrected) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D16 opNum:[%i]\n", opNum);
double *resultPointer = reinterpret_cast<double *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
hipLaunchKernelGGL(( functions::summarystats::summaryStatsReduceDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
resultPointer,
nullptr, 0,
nullptr,
1,
1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
// this is blocking method since method should return scalar
nd4j::DebugHelper::checkErrorCode(stream, "execSSReduceScalarDouble(...) failed");
double result = resultPointer[0];
return result;
}
template <>
_CUDA_H float SummaryStatsReduce<float>::execSummaryStatsReduceScalar(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, float *x, Nd4jLong *xShapeInfo, float *extraParams, bool biasCorrected) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F16 opNum:[%i]\n", opNum);
float *resultPointer = reinterpret_cast<float *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
hipLaunchKernelGGL(( functions::summarystats::summaryStatsReduceFloat), dim3(launchDims.x),dim3(launchDims.y),launchDims.z * 2, *stream,
opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
resultPointer,
nullptr, 0,
nullptr,
1,
1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
// this is blocking method since method should return scalar
nd4j::DebugHelper::checkErrorCode(stream, "execSSReduceScalarFloat(...) failed");
double result = resultPointer[0];
return result;
}
template <>
_CUDA_H float16 SummaryStatsReduce<float16>::execSummaryStatsReduceScalar(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, float16 *x, Nd4jLong *xShapeInfo, float16 *extraParams, bool biasCorrected) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H16 opNum:[%i]\n", opNum);
float16 *resultPointer = reinterpret_cast<float16 *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
hipLaunchKernelGGL(( functions::summarystats::summaryStatsReduceHalf), dim3(launchDims.x),dim3(launchDims.y),launchDims.z * 4, *stream,
opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
resultPointer,
nullptr, 0,
nullptr,
1,
1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
// this is blocking method since method should return scalar
nd4j::DebugHelper::checkErrorCode(stream, "execSSReduceScalarHalf(...) failed");
double result = resultPointer[0];
return result;
}
template <>
_CUDA_H void SummaryStatsReduce<float>::execSummaryStatsReduce(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, float *x, Nd4jLong *xShapeInfo, float *extraParams, float *result, Nd4jLong *resultShapeInfo,bool biasCorrected) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F17 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF17 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( functions::summarystats::summaryStatsReduceFloat), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
result,
resultShapeInfo, shape::rank(hostZShapeInfo),
nullptr,
1,
1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
DEBUG_KERNEL(stream, opNum);
}
template <>
_CUDA_H void SummaryStatsReduce<float16>::execSummaryStatsReduce(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, float16 *x, Nd4jLong *xShapeInfo, float16 *extraParams, float16 *result, Nd4jLong *resultShapeInfo,bool biasCorrected) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H17 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH17 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( functions::summarystats::summaryStatsReduceHalf), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
result,
resultShapeInfo, shape::rank(hostZShapeInfo),
nullptr,
1,
1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
DEBUG_KERNEL(stream, opNum);
}
template <>
_CUDA_H void SummaryStatsReduce<double>::execSummaryStatsReduce(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, double *x, Nd4jLong *xShapeInfo, double *extraParams, double *result, Nd4jLong *resultShapeInfo,bool biasCorrected) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D17 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AD17 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( functions::summarystats::summaryStatsReduceDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
result,
resultShapeInfo, shape::rank(hostZShapeInfo),
nullptr,
1,
1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
DEBUG_KERNEL(stream, opNum);
}
template <>
_CUDA_H void SummaryStatsReduce<double>::execSummaryStatsReduce(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, double *x, Nd4jLong *xShapeInfo, double *extraParams, double *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength,bool biasCorrected) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D18 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
hipLaunchKernelGGL(( functions::summarystats::summaryStatsReduceDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
result,
resultShapeInfo, shape::rank(hostZShapeInfo),
dimension,
dimensionLength,
1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
DEBUG_KERNEL(stream, opNum);
}
template <>
_CUDA_H void SummaryStatsReduce<float>::execSummaryStatsReduce(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, float *x, Nd4jLong *xShapeInfo, float *extraParams, float *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength,bool biasCorrected) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F18 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
// we need shmem buffer big enough to hold double values
launchDims.z *= 2;
hipLaunchKernelGGL(( functions::summarystats::summaryStatsReduceFloat), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
result,
resultShapeInfo, shape::rank(hostZShapeInfo),
dimension,
dimensionLength,
1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
DEBUG_KERNEL(stream, opNum);
}
template <>
_CUDA_H void SummaryStatsReduce<float16>::execSummaryStatsReduce(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, float16 *x, Nd4jLong *xShapeInfo, float16 *extraParams, float16 *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength,bool biasCorrected) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H18 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
// we need shmem buffer big enough to hold double values
launchDims.z *= 4;
hipLaunchKernelGGL(( functions::summarystats::summaryStatsReduceHalf), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
result,
resultShapeInfo, shape::rank(hostZShapeInfo),
dimension,
dimensionLength,
1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
DEBUG_KERNEL(stream, opNum);
}
template class ND4J_EXPORT SummaryStatsReduce<float>;
template class ND4J_EXPORT SummaryStatsReduce<float16>;
template class ND4J_EXPORT SummaryStatsReduce<double>;
}
}
|
873052b77786aaf247217295583530f7e87bd246.cu
|
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <pointercast.h>
#include <types/float16.h>
#include <op_boilerplate.h>
#include <loops/summarystatsreduce.h>
#include <helpers/shape.h>
#include <helpers/TAD.h>
#include <dll.h>
#include <Environment.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_launch_config.h>
#include <helpers/DebugHelper.h>
namespace functions {
namespace summarystats {
/**
* The driver interface for summary stats
* @param op the op number
* @param n the length
* @param dx the input
* @param xShapeInfo the shape information for x
* @param extraParams the extra parameters
* @param result the result buffer
* @param resultShapeInfo the shape information for the result
* @param gpuInformation the gpu information such as block dim, grid dim and shared memory
* @param dimension the dimension to execute along long
* @param dimensionLength the length of the dimension
* @param postProcessOrNot whether to post process or not
*/
template <typename T>
_CUDA_D void SummaryStatsReduce<T>::summaryStatsReduceGeneric(const int op, T *dx, Nd4jLong *xShapeInfo, int xRank, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot,bool biasCorrected, int *allocationBuffer, T *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
__shared__ UnifiedSharedMemory *manager;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
manager = new(shmem) UnifiedSharedMemory((int *) shmem);
manager->init(sizeof(UnifiedSharedMemory), 0, sizeof(functions::summarystats::SummaryStatsReduce<T>), sizeof(shape::TAD), xRank);
}
__syncthreads();
functions::summarystats::SummaryStatsReduce<T>::transform(
op,
dx,
xShapeInfo,
extraParams,
result,
resultShapeInfo,
dimension,
dimensionLength,
biasCorrected,
allocationBuffer,
reductionBuffer,
manager,
tadOnlyShapeInfo,
tadOffsets);
}
_CUDA_G void summaryStatsReduceDouble(int op, double *dx, Nd4jLong *xShapeInfo, int xRank, double *extraParams, double *result, Nd4jLong *resultShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot, bool biasCorrected, int *allocationBuffer, double *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
SummaryStatsReduce<double>::summaryStatsReduceGeneric(
op,
dx,
xShapeInfo, xRank,
extraParams,
result,
resultShapeInfo, zRank,
dimension,
dimensionLength,
postProcessOrNot,biasCorrected, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
}
_CUDA_G void summaryStatsReduceFloat(int op, float *dx, Nd4jLong *xShapeInfo, int xRank, float *extraParams, float *result, Nd4jLong *resultShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot,bool biasCorrected,int *allocationBuffer, float *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
SummaryStatsReduce<float>::summaryStatsReduceGeneric(
op,
dx,
xShapeInfo, xRank,
extraParams,
result,
resultShapeInfo, zRank,
dimension,
dimensionLength,
postProcessOrNot,biasCorrected, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
}
_CUDA_G void summaryStatsReduceHalf(int op, float16 *dx, Nd4jLong *xShapeInfo, int xRank, float16 *extraParams, float16 *result, Nd4jLong *resultShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot,bool biasCorrected,int *allocationBuffer, float16 *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
SummaryStatsReduce<float16>::summaryStatsReduceGeneric(
op,
dx,
xShapeInfo, xRank,
extraParams,
result,
resultShapeInfo, zRank,
dimension,
dimensionLength,
postProcessOrNot,biasCorrected, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
}
/*
template <typename T>
void __global__ SummaryStatsReduce<T>::summaryStatsReduceT(int op, T *dx, int *xShapeInfo, int xRank, T *extraParams, T *result, int *resultShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot,bool biasCorrected,int *allocationBuffer, T *reductionBuffer, int *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
summaryStatsReduceGeneric<T>(
op,
dx,
xShapeInfo, xRank,
extraParams,
result,
resultShapeInfo, zRank,
dimension,
dimensionLength,
postProcessOrNot,biasCorrected, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
}
*/
/**
*
* @param sPartialsRef
* @param tid
* @param extraParams
*/
template<typename T>
template<typename OpType>
_CUDA_D void SummaryStatsReduce<T>::aggregatePartials(SummaryStatsData<T> **sPartialsRef, Nd4jLong tid, Nd4jLong numElements, T *extraParams) {
// start the shared memory loop on the next power of 2 less
// than the block size. If block size is not a power of 2,
// accumulate the intermediate sums in the remainder range.
SummaryStatsData<T> *sPartials = *sPartialsRef;
Nd4jLong floorPow2 = blockDim.x;
if (floorPow2 & (floorPow2 - 1)) {
while (floorPow2 & (floorPow2 - 1)) {
floorPow2 &= floorPow2 - 1;
}
if (tid >= floorPow2) {
SummaryStatsData<T> prev = sPartials[tid - floorPow2];
SummaryStatsData<T> curr = sPartials[tid];
sPartials[tid - floorPow2] = update(prev, curr, extraParams);
}
__syncthreads();
}
for (Nd4jLong activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) {
if (tid < activeThreads && tid + activeThreads < numElements) {
SummaryStatsData<T> curr = sPartials[tid];
SummaryStatsData<T> next = sPartials[tid + activeThreads];
sPartials[tid] = update(curr, next, extraParams);
}
__syncthreads();
}
};
/**
* @param n n is the number of
* elements to loop through
* @param dx the data to operate on
* @param xVectorInfo the meta data for the vector:
* 0 is the offset
* 1 is the increment/stride
* 2 is the real length of the buffer (n and dx.length won't always be the same)
* 3 is the element wise stride for the buffer
* 4 is the number of elements it takes to get to the next row/column/tensor
* @param gpuInformation
* 0 is the block size
* 1 is the grid size
* 2 is the shared memory size
* @param problemDefinition
* 0 is the number of elements per vector
* 1 is the number of vectors
*/
template<typename T>
template<typename OpType>
_CUDA_D void SummaryStatsReduce<T>::transform(T *dx, Nd4jLong *xShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, T *reductionBuffer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
/**
* Gpu information for the problem
*/
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ volatile int resultScalar;
__shared__ int xElementWiseStride;
int numElements = blockDim.x;
//shared memory space for storing intermediate results
SummaryStatsData<T> *sPartials;
//functions::summarystats::SharedSummaryStatsData<T> holder;
sPartials = (SummaryStatsData<T> *) manager->getSharedReductionBuffer(); //holder.getPointer();
T startingVal = startingValue(dx);
SummaryStatsData<T> val;
val.initWithValue(startingVal);
val.n = 0;
sPartials[threadIdx.x] = val;
//length for the tad
__shared__ volatile int xLength;
__shared__ volatile int resultLength;
SummaryStatsData <T> reduction;
reduction.initWithValue(0.0);
reduction.n = 0;
if (threadIdx.x == 0) {
if (resultShapeInfo != nullptr)
resultLength = shape::length(resultShapeInfo);
else resultLength = 1;
if (dimensionLength == 1) {
if (dimension == nullptr || dimension[0] == MAX_DIMENSION)
resultScalar = 1;
else
resultScalar = 0;
}
else
resultScalar = 0;
if (resultLength == 1)
resultScalar = 1;
auto xStride = shape::stride(xShapeInfo);
auto xOrder = shape::order(xShapeInfo);
if (dimension != nullptr && (dimension[0] != MAX_DIMENSION && dimensionLength == 1)) {
xElementWiseStride = xStride[dimension[0]];
}
else {
xElementWiseStride = shape::elementWiseStride(xShapeInfo);
}
xLength = shape::length(xShapeInfo);
}
__syncthreads();
if (!resultScalar) {
__shared__ int tadLength;
__shared__ int tadEWS;
__shared__ int tadRank;
__shared__ int numTads;
__shared__ Nd4jLong *tadShape;
__shared__ Nd4jLong *tadStride;
if (threadIdx.x == 0) {
tadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength);
tadEWS = shape::elementWiseStride(tadOnlyShapeInfo);
tadRank = shape::rank(tadOnlyShapeInfo);
numTads = shape::length(xShapeInfo) / tadLength;
tadShape = shape::shapeOf(tadOnlyShapeInfo);
tadStride = shape::stride(tadOnlyShapeInfo);
}
__syncthreads();
if (dimensionLength > 1) {
Nd4jLong xCoord[MAX_RANK];
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
auto tadOffsetForBlock = tadOffsets[r];
val.initWithValue(startingVal);
val.n = 0;
sPartials[threadIdx.x] = val;
for (int i = threadIdx.x; i < tadLength; i += blockDim.x) {
shape::ind2subC(tadRank, tadShape, i, tadLength, xCoord);
Nd4jLong xOffset = shape::getOffset(tadOffsetForBlock, tadShape, tadStride, xCoord, tadRank);
SummaryStatsData <T> indexVal2;
indexVal2.initWithValue(dx[xOffset]);
sPartials[threadIdx.x] = update(sPartials[threadIdx.x], OpType::op(indexVal2, extraParams), extraParams);
}
__syncthreads();
aggregatePartials<OpType>(&sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraParams);
__syncthreads();
if (threadIdx.x == 0) {
result[r] = OpType::getValue(postProcessOrNot, sPartials[threadIdx.x]);
}
}
}
else {
for (int i = blockIdx.x; i < numTads; i += gridDim.x) {
auto tadOffsetForBlock = tadOffsets[i];
val.initWithValue(startingVal);
val.n = 0;
sPartials[threadIdx.x] = val;
auto indexX = tadOffsetForBlock + (xElementWiseStride * threadIdx.x);
if (threadIdx.x < tadLength) {
SummaryStatsData <T> indexVal;
indexVal.initWithValue(dx[indexX]);
sPartials[threadIdx.x] = OpType::op(indexVal, extraParams);
}
for (int x = threadIdx.x + blockDim.x; x < tadLength; x += blockDim.x) {
indexX = tadOffsetForBlock + x * tadEWS;
SummaryStatsData <T> indexVal2;
indexVal2.initWithValue(dx[indexX]);
sPartials[threadIdx.x] = update(sPartials[threadIdx.x], OpType::op(indexVal2, extraParams), extraParams);
}
__syncthreads();
aggregatePartials<OpType>(&sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraParams);
__syncthreads();
if (threadIdx.x == 0) {
result[i] = OpType::getValue(postProcessOrNot, sPartials[threadIdx.x]); //postProcess(sPartials[0],tadLength ,extraParams);
}
}
}
}
else if (resultScalar) {
__shared__ int n;
if (threadIdx.x == 0) {
xElementWiseStride = shape::elementWiseStride(xShapeInfo);
n = shape::length(xShapeInfo);
}
__syncthreads();
if (xElementWiseStride >= 1) {
for (Nd4jLong i = tid; i < n; i += (blockDim.x * gridDim.x)) {
SummaryStatsData <T> indexVal2;
indexVal2.initWithValue(dx[i * xElementWiseStride]);
reduction = update(reduction, indexVal2, extraParams);
}
}
else {
__shared__ int rank;
__shared__ Nd4jLong *xShape;
__shared__ Nd4jLong *xStride;
if (threadIdx.x == 0) {
rank = shape::rank(xShapeInfo);
xShape = shape::shapeOf(xShapeInfo);
xStride = shape::stride(xShapeInfo);
}
__syncthreads();
Nd4jLong ind2sub[MAX_RANK];
for (Nd4jLong i = tid; i < n; i += blockDim.x * gridDim.x) {
shape::ind2sub(rank, shape::shapeOf(xShapeInfo), i, n, ind2sub);
auto offset = shape::getOffset(0, xShape, xStride, ind2sub, rank);
SummaryStatsData <T> indexVal2;
indexVal2.initWithValue(dx[offset]);
reduction = update(reduction, indexVal2, extraParams);
}
}
sPartials[threadIdx.x] = reduction;
__syncthreads();
aggregatePartials<OpType>(&sPartials, threadIdx.x, blockDim.x, extraParams);
__syncthreads();
if (gridDim.x > 1) {
__shared__ bool amLast;
unsigned int *tc = (unsigned int *)reductionBuffer;
int rank = shape::rank(xShapeInfo);
tid = threadIdx.x;
if (threadIdx.x == 0) {
SummaryStatsData<T> *pBuffer = (SummaryStatsData<T> *) reductionBuffer;
pBuffer[blockIdx.x] = sPartials[0];
}
__syncthreads();
__threadfence();
if (tid == 0) {
unsigned int ticket = atomicInc(&tc[16384], gridDim.x);
amLast = (ticket == gridDim.x - 1);
}
__syncthreads();
if (amLast) {
tc[16384] = 0;
SummaryStatsData<T> *pBuffer = (SummaryStatsData<T> *) reductionBuffer;
T startingVal = startingValue(dx);
SummaryStatsData<T> val;
val.initWithValue(startingVal);
val.n = 0;
sPartials[threadIdx.x] = val;
for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) {
sPartials[threadIdx.x] = update(sPartials[threadIdx.x], pBuffer[i], extraParams);
}
__syncthreads();
aggregatePartials<OpType>(&sPartials, threadIdx.x, gridDim.x, extraParams);
__syncthreads();
if (tid == 0) {
result[0] = OpType::getValue(postProcessOrNot, sPartials[0]);
}
}
}
else {
if (tid == 0) {
unsigned int *tc = (unsigned *)reductionBuffer;
tc[16384] = 0;
result[0] = result[0] = OpType::getValue(postProcessOrNot, sPartials[0]);
}
}
}
};
template <typename T>
_CUDA_D void SummaryStatsReduce<T>::transform(const int opNum, T *dx, Nd4jLong *xShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, T *reductionBuffer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {
DISPATCH_BY_OPNUM(transform, PARAMS(dx, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationBuffer, reductionBuffer, manager, tadOnlyShapeInfo, tadOffsets), SUMMARY_STATS_OPS);
};
template <>
_CUDA_H double SummaryStatsReduce<double>::execSummaryStatsReduceScalar(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, double *x, Nd4jLong *xShapeInfo, double *extraParams, bool biasCorrected) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D16 opNum:[%i]\n", opNum);
double *resultPointer = reinterpret_cast<double *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
functions::summarystats::summaryStatsReduceDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
resultPointer,
nullptr, 0,
nullptr,
1,
1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
// this is blocking method since method should return scalar
nd4j::DebugHelper::checkErrorCode(stream, "execSSReduceScalarDouble(...) failed");
double result = resultPointer[0];
return result;
}
template <>
_CUDA_H float SummaryStatsReduce<float>::execSummaryStatsReduceScalar(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, float *x, Nd4jLong *xShapeInfo, float *extraParams, bool biasCorrected) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F16 opNum:[%i]\n", opNum);
float *resultPointer = reinterpret_cast<float *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
functions::summarystats::summaryStatsReduceFloat<<<launchDims.x,launchDims.y,launchDims.z * 2, *stream>>>(
opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
resultPointer,
nullptr, 0,
nullptr,
1,
1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
// this is blocking method since method should return scalar
nd4j::DebugHelper::checkErrorCode(stream, "execSSReduceScalarFloat(...) failed");
double result = resultPointer[0];
return result;
}
template <>
_CUDA_H float16 SummaryStatsReduce<float16>::execSummaryStatsReduceScalar(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, float16 *x, Nd4jLong *xShapeInfo, float16 *extraParams, bool biasCorrected) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H16 opNum:[%i]\n", opNum);
float16 *resultPointer = reinterpret_cast<float16 *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
functions::summarystats::summaryStatsReduceHalf<<<launchDims.x,launchDims.y,launchDims.z * 4, *stream>>>(
opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
resultPointer,
nullptr, 0,
nullptr,
1,
1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
// this is blocking method since method should return scalar
nd4j::DebugHelper::checkErrorCode(stream, "execSSReduceScalarHalf(...) failed");
double result = resultPointer[0];
return result;
}
template <>
_CUDA_H void SummaryStatsReduce<float>::execSummaryStatsReduce(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, float *x, Nd4jLong *xShapeInfo, float *extraParams, float *result, Nd4jLong *resultShapeInfo,bool biasCorrected) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F17 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF17 opNum:[%i]\n", opNum);
functions::summarystats::summaryStatsReduceFloat<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
result,
resultShapeInfo, shape::rank(hostZShapeInfo),
nullptr,
1,
1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
DEBUG_KERNEL(stream, opNum);
}
template <>
_CUDA_H void SummaryStatsReduce<float16>::execSummaryStatsReduce(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, float16 *x, Nd4jLong *xShapeInfo, float16 *extraParams, float16 *result, Nd4jLong *resultShapeInfo,bool biasCorrected) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H17 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH17 opNum:[%i]\n", opNum);
functions::summarystats::summaryStatsReduceHalf<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
result,
resultShapeInfo, shape::rank(hostZShapeInfo),
nullptr,
1,
1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
DEBUG_KERNEL(stream, opNum);
}
template <>
_CUDA_H void SummaryStatsReduce<double>::execSummaryStatsReduce(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, double *x, Nd4jLong *xShapeInfo, double *extraParams, double *result, Nd4jLong *resultShapeInfo,bool biasCorrected) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D17 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AD17 opNum:[%i]\n", opNum);
functions::summarystats::summaryStatsReduceDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
result,
resultShapeInfo, shape::rank(hostZShapeInfo),
nullptr,
1,
1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
DEBUG_KERNEL(stream, opNum);
}
template <>
_CUDA_H void SummaryStatsReduce<double>::execSummaryStatsReduce(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, double *x, Nd4jLong *xShapeInfo, double *extraParams, double *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength,bool biasCorrected) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D18 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
functions::summarystats::summaryStatsReduceDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
result,
resultShapeInfo, shape::rank(hostZShapeInfo),
dimension,
dimensionLength,
1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
DEBUG_KERNEL(stream, opNum);
}
template <>
_CUDA_H void SummaryStatsReduce<float>::execSummaryStatsReduce(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, float *x, Nd4jLong *xShapeInfo, float *extraParams, float *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength,bool biasCorrected) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F18 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
// we need shmem buffer big enough to hold double values
launchDims.z *= 2;
functions::summarystats::summaryStatsReduceFloat<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
result,
resultShapeInfo, shape::rank(hostZShapeInfo),
dimension,
dimensionLength,
1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
DEBUG_KERNEL(stream, opNum);
}
template <>
_CUDA_H void SummaryStatsReduce<float16>::execSummaryStatsReduce(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, float16 *x, Nd4jLong *xShapeInfo, float16 *extraParams, float16 *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength,bool biasCorrected) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H18 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
// we need shmem buffer big enough to hold double values
launchDims.z *= 4;
functions::summarystats::summaryStatsReduceHalf<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
result,
resultShapeInfo, shape::rank(hostZShapeInfo),
dimension,
dimensionLength,
1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
DEBUG_KERNEL(stream, opNum);
}
template class ND4J_EXPORT SummaryStatsReduce<float>;
template class ND4J_EXPORT SummaryStatsReduce<float16>;
template class ND4J_EXPORT SummaryStatsReduce<double>;
}
}
|
00962608c7d81a39e00b2e066607689c27d3e1a7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <rocblas.h>
double EVALUATE_ERROR(int, int, double*);
void FPRINTF(FILE*, int N, double, double*);
__global__ void INITIALIZE(int N, double dx, double* rho, double* field, double* field_analytic)
{
int idx_x = threadIdx.x + blockIdx.x*blockDim.x;
int idx_y = threadIdx.y + blockIdx.y*blockDim.y;
if (idx_x<N&&idx_y<N)
{
int idx = idx_x + idx_y*N;
double x = idx_x*dx;
double y = idx_y*dx;
field_analytic[idx] = x*(1.-x)*y*(1.-y)*exp(x-y);
if (idx_x!=0&&idx_x!=N-1&&idx_y!=0&&idx_y!=N-1)
{
field[idx] = 0.0;
rho[idx] = (2.*x*(y-1)*(y-2.*x+x*y+2)*exp(x-y))*dx*dx; // Notice that rho has been times by dx^2!!
}
else
{
field[idx] = field_analytic[idx];
rho[idx] = 0.0;
}
}
}
__global__ void EVALUATE_ERROR_BLOCK(int N, double* rho, double* field, double* error_block)
{
extern __shared__ double sm[];
int idx_x = threadIdx.x + blockIdx.x*blockDim.x;
int idx_y = threadIdx.y + blockIdx.y*blockDim.y;
int idx_sm = threadIdx.x + blockDim.x*threadIdx.y;
sm[idx_sm] = 0.0;
__syncthreads();
if (idx_x<N&&idx_y<N)
{
int idx = idx_x + N*idx_y;
int idx_sm = threadIdx.x + blockDim.x*threadIdx.y;
if (idx_x!=0&&idx_x!=N-1&&idx_y!=0&&idx_y!=N-1)
{
int L = idx_x-1 + idx_y*N;
int R = idx_x+1 + idx_y*N;
int U = idx_x + (idx_y+1)*N;
int D = idx_x + (idx_y-1)*N;
sm[idx_sm] = pow((field[L]+field[R]+field[U]+field[D]-4.*field[idx])-rho[idx], 2.);
}
__syncthreads();
for (int shift=blockDim.x*blockDim.y/2; shift>0; shift/=2)
{
if (idx_sm<shift)
sm[idx_sm] += sm[idx_sm+shift];
__syncthreads();
}
if (idx_sm==0)
error_block[blockIdx.x+blockIdx.y*gridDim.x] = sm[0];
}
}
__global__ void LAPLACIAN_DOUBLE(int N, double dx, double photon_mass, double factor, double* p, double* A_p)
{
int idx_x = threadIdx.x + blockDim.x*blockIdx.x;
int idx_y = threadIdx.y + blockDim.y*blockIdx.y;
if (idx_x<N&&idx_y<N)
{
int idx = idx_x + N*idx_y;
if (idx_x!=0&&idx_x!=N-1&&idx_y!=0&&idx_y!=N-1)
{
int L = idx_x-1 + idx_y*N;
int R = idx_x+1 + idx_y*N;
int U = idx_x + (idx_y+1)*N;
int D = idx_x + (idx_y-1)*N;
A_p[idx] = factor*(p[L]+p[R]+p[U]+p[D]-(4.+pow(photon_mass*dx,2.))*p[idx]);
}
else
A_p[idx] = factor*p[idx];
}
}
__global__ void LAPLACIAN_FLOAT(int N, float dx, float photon_mass, float factor, float* p, float* A_p)
{
int idx_x = threadIdx.x + blockDim.x*blockIdx.x;
int idx_y = threadIdx.y + blockDim.y*blockIdx.y;
if (idx_x<N&&idx_y<N)
{
int idx = idx_x + N*idx_y;
if (idx_x!=0&&idx_x!=N-1&&idx_y!=0&&idx_y!=N-1)
{
int L = idx_x-1 + idx_y*N;
int R = idx_x+1 + idx_y*N;
int U = idx_x + (idx_y+1)*N;
int D = idx_x + (idx_y-1)*N;
A_p[idx] = factor*(p[L]+p[R]+p[U]+p[D]-(4.+powf(photon_mass*dx,2.))*p[idx]);
}
else
A_p[idx] = factor*p[idx];
}
}
__global__ void DAXPY_DOUBLE(int N, double c, double *A, double *B)
{
int idx_x = threadIdx.x + blockDim.x*blockIdx.x;
int idx_y = threadIdx.y + blockDim.y*blockIdx.y;
if (idx_x<N&&idx_y<N)
{
int idx = idx_x + N*idx_y;
A[idx] = c*A[idx] + B[idx];
}
}
__global__ void DAXPY_FLOAT(int N, float c, float *A, float *B)
{
int idx_x = threadIdx.x + blockDim.x*blockIdx.x;
int idx_y = threadIdx.y + blockDim.y*blockIdx.y;
if (idx_x<N&&idx_y<N)
{
int idx = idx_x + N*idx_y;
A[idx] = c*A[idx] + B[idx];
}
}
__global__ void INTERPOLATE_2D_LAST(int dimension, float* field_coarse, double* field_fine)
{
int N_fine = dimension;
int idx_x_fine = threadIdx.x + blockDim.x*blockIdx.x;
int idx_y_fine = threadIdx.y + blockDim.y*blockIdx.y;
if (idx_x_fine<N_fine&&idx_y_fine<N_fine)
{
int idx_fine = idx_x_fine + N_fine*idx_y_fine;
int N_coarse = (N_fine-1)/2 + 1;
int idx_x_coarse = idx_x_fine/2;
int idx_y_coarse = idx_y_fine/2;
int idx_coarse = idx_x_coarse + N_coarse*idx_y_coarse;
if (idx_x_fine%2==0&&idx_y_fine%2==0)
field_fine[idx_fine] = (double)(field_coarse[idx_coarse]);
else if (idx_x_fine%2==1&&idx_y_fine%2==0)
field_fine[idx_fine] = (double)(0.5*(field_coarse[idx_coarse]+field_coarse[idx_coarse+1]));
else if (idx_x_fine%2==0&&idx_y_fine%2==1)
field_fine[idx_fine] = (double)(0.5*(field_coarse[idx_coarse]+field_coarse[idx_coarse+N_coarse]));
else
field_fine[idx_fine] = (double)(0.25*(field_coarse[idx_coarse]+field_coarse[idx_coarse+1]+field_coarse[idx_coarse+N_coarse]+field_coarse[idx_coarse+N_coarse+1]));
}
}
__global__ void INTERPOLATE_2D(int dimension, float* field_coarse, float* field_fine)
{
int N_fine = dimension;
int idx_x_fine = threadIdx.x + blockDim.x*blockIdx.x;
int idx_y_fine = threadIdx.y + blockDim.y*blockIdx.y;
if (idx_x_fine<N_fine&&idx_y_fine<N_fine)
{
int idx_fine = idx_x_fine + N_fine*idx_y_fine;
int N_coarse = (N_fine-1)/2 + 1;
int idx_x_coarse = idx_x_fine/2;
int idx_y_coarse = idx_y_fine/2;
int idx_coarse = idx_x_coarse + N_coarse*idx_y_coarse;
if (idx_x_fine%2==0&&idx_y_fine%2==0)
field_fine[idx_fine] = field_coarse[idx_coarse];
else if (idx_x_fine%2==1&&idx_y_fine%2==0)
field_fine[idx_fine] = 0.5*(field_coarse[idx_coarse]+field_coarse[idx_coarse+1]);
else if (idx_x_fine%2==0&&idx_y_fine%2==1)
field_fine[idx_fine] = 0.5*(field_coarse[idx_coarse]+field_coarse[idx_coarse+N_coarse]);
else
field_fine[idx_fine] = 0.25*(field_coarse[idx_coarse]+field_coarse[idx_coarse+1]+field_coarse[idx_coarse+N_coarse]+field_coarse[idx_coarse+N_coarse+1]);
}
}
__global__ void RESTRICT_2D_FIRST(int dimension, double* field_fine, float* field_coarse)
{
int N_coarse = dimension;
int idx_x_coarse = threadIdx.x + blockDim.x*blockIdx.x;
int idx_y_coarse = threadIdx.y + blockDim.y*blockIdx.y;
if (idx_x_coarse<N_coarse&&idx_y_coarse<N_coarse)
{
int idx_coarse = idx_x_coarse + N_coarse*idx_y_coarse;
int N_fine = (N_coarse-1)*2 + 1;
int idx_x_fine = idx_x_coarse*2;
int idx_y_fine = idx_y_coarse*2;
int idx_fine = idx_x_fine + idx_y_fine*N_fine;
if (idx_x_coarse!=0&&idx_x_coarse!=N_coarse-1&&idx_y_coarse!=0&&idx_y_coarse!=N_coarse-1)
field_coarse[idx_coarse] = (float)(1./16.*(field_fine[idx_fine-4]+field_fine[idx_fine-2]+field_fine[idx_fine+2]+field_fine[idx_fine+4]) + 1./8.*(field_fine[idx_fine-3]+field_fine[idx_fine-1]+field_fine[idx_fine+1]+field_fine[idx_fine+3]) + 1./4.*field_fine[idx_fine]);
else
field_coarse[idx_coarse] = (float)(field_fine[idx_fine]);
}
}
__global__ void RESTRICT_2D(int dimension, float* field_fine, float* field_coarse)
{
int N_coarse = dimension;
int idx_x_coarse = threadIdx.x + blockDim.x*blockIdx.x;
int idx_y_coarse = threadIdx.y + blockDim.y*blockIdx.y;
if (idx_x_coarse<N_coarse&&idx_y_coarse<N_coarse)
{
int idx_coarse = idx_x_coarse + N_coarse*idx_y_coarse;
int N_fine = (N_coarse-1)*2 + 1;
int idx_x_fine = idx_x_coarse*2;
int idx_y_fine = idx_y_coarse*2;
int idx_fine = idx_x_fine + idx_y_fine*N_fine;
if (idx_x_coarse!=0&&idx_x_coarse!=N_coarse-1&&idx_y_coarse!=0&&idx_y_coarse!=N_coarse-1)
field_coarse[idx_coarse] = (float)(1./16.*(field_fine[idx_fine-4]+field_fine[idx_fine-2]+field_fine[idx_fine+2]+field_fine[idx_fine+4]) + 1./8.*(field_fine[idx_fine-3]+field_fine[idx_fine-1]+field_fine[idx_fine+1]+field_fine[idx_fine+3]) + 1./4.*field_fine[idx_fine]);
else
field_coarse[idx_coarse] = (float)(field_fine[idx_fine]);
}
}
int main(void)
{
int N, N_level, inner_loop, N_block, display_interval, tpb_x, tpb_y, bpg_x, bpg_y;
float preparation_time, computation_time, total_time;
float alpha_f, beta_f;
double photon_mass, dx, criteria;
double alpha_d, beta_d, error;
long iter, iter_max;
int *dimension_level;
float *p_f, *A_p_f, *r_temp_f;
float **field_level_f, **r_level_f;
double *rho, *p_d, *A_p_d, *field_analytic, *error_block, *r_temp_d;
double *field_d, *r_d;
size_t size_lattice_d, size_lattice_f, size_sm;
hipEvent_t start, stop;
FILE* output_field, *output_rho;
printf("Solve the Poission problem using CG by GPU.\n\n");
printf("Enter the latttice size (N,N) .");
scanf("%d", &N);
printf("The lattice size is (%d,%d).\n", N, N);
printf("Set the depth of the V process.\n");
scanf("%d",&N_level);
printf("The depth of V process is %d .\n", N_level);
printf("Set the number of inner-loop.\n");
scanf("%d",&inner_loop);
printf("The number of inner-loop is %d .\n", inner_loop);
printf("Set the photon mass.\n");
scanf("%lf", &photon_mass);
printf("The photon mass is %.4e .\n", photon_mass);
printf("Set the maximum iteration times.\n");
scanf("%ld", &iter_max);
printf("The maximum iteration times is %ld .\n", iter_max);
printf("Set the stopping criteria.\n");
scanf("%lf", &criteria);
printf("The stopping criteria is %.4e .\n", criteria);
printf("Set the display interval during iterations.\n");
scanf("%d", &display_interval);
printf("The display interval is set to be %d .\n", display_interval);
printf("Set the GPU threads per block (tx,ty). \n");
scanf("%d %d", &tpb_x, &tpb_y);
printf("Threads per block for GPU is (%d,%d) .\n", tpb_x, tpb_y);
printf("The block per grid will be set automatically.");
bpg_x = (N+tpb_x-1)/tpb_x;
bpg_y = (N+tpb_y-1)/tpb_y;
printf("Blocks per grid for GPU is (%d,%d) .\n", bpg_x, bpg_y);
printf("\n");
printf("Start Preparation...\n");
dx = 1./(N-1);
N_block = bpg_x*bpg_y;
size_lattice_d = N*N*sizeof(double);
size_lattice_f = N*N*sizeof(float);
size_sm = tpb_x*tpb_y*sizeof(double);
output_field = fopen("analytical_field_distribution_CG.txt","w");
output_rho = fopen("charge_distribution_CG.txt","w");
hipSetDevice(0);
hipEventCreate(&start);
hipEventCreate(&stop);
dim3 tpb(tpb_x,tpb_y);
dim3 bpg(bpg_x,bpg_y);
cublasMath_t mode = CUBLAS_TENSOR_OP_MATH;
hipblasPointerMode_t mode_pt = HIPBLAS_POINTER_MODE_HOST;
hipblasHandle_t handle;
hipblasCreate(&handle);
cublasSetMathMode(handle, mode);
hipblasSetPointerMode(handle, mode_pt);
hipEventRecord(start,0);
hipMallocManaged(&p_f, size_lattice_f);
hipMallocManaged(&A_p_f, size_lattice_f);
hipMallocManaged(&p_d, size_lattice_d);
hipMallocManaged(&A_p_d, size_lattice_d);
hipMallocManaged(&field_analytic, size_lattice_d);
hipMallocManaged(&rho, size_lattice_d);
hipMallocManaged(&error_block, N_block*sizeof(double));
hipMallocManaged(&dimension_level, (N_level+1)*sizeof(int));
hipMallocManaged(&r_temp_f, size_lattice_f);
hipMallocManaged(&r_temp_d, size_lattice_d);
hipMallocManaged(&field_d, size_lattice_d);
hipMallocManaged(&r_d, size_lattice_d);
/* allocate the memory for multi-grid */
field_level_f = (float**)malloc(N_level*sizeof(float*));
r_level_f = (float**)malloc(N_level*sizeof(float*));
int dimension = (N-1)/2;
for (int level=0; level<N_level; level++)
{
hipMallocManaged(&field_level_f[level], (dimension+1)*(dimension+1)*sizeof(float));
hipMallocManaged(&r_level_f[level], (dimension+1)*(dimension+1)*sizeof(float));
dimension_level[level] = dimension + 1;
dimension /= 2;
}
hipLaunchKernelGGL(( INITIALIZE), dim3(bpg),dim3(tpb), 0, 0, N, dx, rho, field_d, field_analytic);
hipLaunchKernelGGL(( EVALUATE_ERROR_BLOCK), dim3(bpg),dim3(tpb),size_sm, 0, N, rho, field_d, error_block);
double norm;
hipblasDdot(handle, N*N, rho, 1, rho, 1, &norm);
norm = sqrt(norm);
hipMemcpy(r_d, rho, size_lattice_d, hipMemcpyDeviceToDevice);
// if (N_level==0)
// hipMemcpy(p_d, rho, size_lattice_d, hipMemcpyDeviceToDevice);
hipMemcpy(p_d, rho, size_lattice_d, hipMemcpyDeviceToDevice);
hipMemset(field_d, 0, size_lattice_d);
FPRINTF(output_field, N, 1., field_analytic);
FPRINTF(output_rho, N, pow(dx,-2.), rho);
printf("Preparation ends.\n");
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&preparation_time, start, stop);
printf("Total preparation time is %.4f ms.\n\n", preparation_time);
hipEventRecord(start,0);
error = EVALUATE_ERROR(N, N_block, error_block);
float temp_f, norm_in, error_in = 1.0;
double temp_d;
printf("Starts computation with error = %.8e...\n", sqrt(error)/norm);
iter = 0;
int dl;
float one_f = 1.;
float mone_f = -1.;
float zero_f = 0.;
double one_d = 1.;
double mone_d = -1.;
double zero_d = 0.;
while (sqrt(error)/norm>criteria&&iter<iter_max)
{
if (N_level!=0)
{
for (int loop=0; loop<inner_loop; loop++)
{
hipLaunchKernelGGL(( LAPLACIAN_DOUBLE), dim3(bpg),dim3(tpb), 0, 0, N, dx, photon_mass, 1., p_d, A_p_d);
hipblasDdot(handle, N*N, p_d, 1, A_p_d, 1, &temp_d);
alpha_d = error/temp_d;
temp_d = -alpha_d;
hipblasDaxpy(handle, N*N, &temp_d, A_p_d, 1, r_d, 1);
hipblasDaxpy(handle, N*N, &alpha_d, p_d, 1, field_d, 1);
hipblasDdot(handle, N*N, r_d, 1, r_d, 1, &temp_d);
beta_d = temp_d/error;
hipLaunchKernelGGL(( DAXPY_DOUBLE), dim3(bpg),dim3(tpb), 0, 0, N, beta_d, p_d, r_d);
error = temp_d;
}
dl = dimension_level[0];
hipLaunchKernelGGL(( RESTRICT_2D_FIRST), dim3(bpg),dim3(tpb), 0, 0, dl, r_d, r_level_f[0]);
for (int l=0; l<N_level-1; l++)
{
hipMemset(field_level_f[l], 0, dl*dl*sizeof(float));
hipMemcpy(p_f, r_level_f[l], dl*dl*sizeof(float), hipMemcpyDeviceToDevice);
hipblasSdot(handle, dl*dl, r_level_f[l], 1, r_level_f[l], 1, &error_in);
for (int loop=0; loop<inner_loop; loop++)
{
hipLaunchKernelGGL(( LAPLACIAN_FLOAT), dim3(bpg),dim3(tpb), 0, 0, dl, (float)(dx), (float)(photon_mass), 1./powf(2.,2.*(l+1)), p_f, A_p_f);
hipblasSdot(handle, dl*dl, p_f, 1, A_p_f, 1, &temp_f);
alpha_f = error_in/temp_f;
temp_f = -alpha_f;
hipblasSaxpy(handle, dl*dl, &temp_f, A_p_f, 1, r_level_f[l], 1);
hipblasSaxpy(handle, dl*dl, &alpha_f, p_f, 1, field_level_f[l], 1);
hipblasSdot(handle, dl*dl, r_level_f[l], 1, r_level_f[l], 1, &temp_f);
beta_f = temp_f/error_in;
hipLaunchKernelGGL(( DAXPY_FLOAT), dim3(bpg),dim3(tpb), 0, 0, dl, beta_f, p_f, r_level_f[l]);
error_in = temp_f;
}
// printf("%.4e\n", error_in);
dl = dimension_level[l+1];
hipLaunchKernelGGL(( RESTRICT_2D), dim3(bpg),dim3(tpb), 0, 0, dl, r_level_f[l], r_level_f[l+1]);
}
dl = dimension_level[N_level-1];
hipMemset(field_level_f[N_level-1], 0, dl*dl*sizeof(float));
hipblasSdot(handle, dl*dl, r_level_f[N_level-1], 1, r_level_f[N_level-1], 1, &error_in);
norm_in = error_in;
hipMemcpy(p_f, r_level_f[N_level-1], dl*dl*sizeof(float), hipMemcpyDeviceToDevice);
for (int loop=0; loop<inner_loop; loop++)
{
hipLaunchKernelGGL(( LAPLACIAN_FLOAT), dim3(bpg),dim3(tpb), 0, 0, dl, (float)(dx), (float)(photon_mass), 1./powf(2.,2.*(N_level)), p_f, A_p_f);
hipblasSdot(handle, dl*dl, p_f, 1, A_p_f, 1, &temp_f);
alpha_f = error_in/temp_f;
temp_f = -alpha_f;
hipblasSaxpy(handle, dl*dl, &temp_f, A_p_f, 1, r_level_f[N_level-1], 1);
hipblasSaxpy(handle, dl*dl, &alpha_f, p_f, 1, field_level_f[N_level-1], 1);
hipblasSdot(handle, dl*dl, r_level_f[N_level-1], 1, r_level_f[N_level-1], 1, &temp_f);
beta_f = temp_f/error_in;
hipLaunchKernelGGL(( DAXPY_FLOAT), dim3(bpg),dim3(tpb), 0, 0, dl, beta_f, p_f, r_level_f[N_level-1]);
error_in = temp_f;
}
for (int l=N_level-2; l>=0; l--)
{
dl = dimension_level[l];
hipMemcpy(r_temp_f, r_level_f[l], dl*dl*sizeof(float), hipMemcpyDeviceToDevice);
hipLaunchKernelGGL(( INTERPOLATE_2D), dim3(bpg),dim3(tpb), 0, 0, dl, field_level_f[l+1], p_f);
hipblasSaxpy(handle, dl*dl, &one_f, p_f, 1, field_level_f[l], 1);
hipLaunchKernelGGL(( LAPLACIAN_FLOAT), dim3(bpg),dim3(tpb), 0, 0, dl, (float)(dx), (float)(photon_mass), 1./powf(2.,2.*(l+1)), field_level_f[l], r_level_f[l]);
hipLaunchKernelGGL(( DAXPY_FLOAT), dim3(bpg),dim3(tpb), 0, 0, dl, mone_f, r_level_f[l], r_temp_f);
hipblasSdot(handle, dl*dl, r_level_f[l], 1, r_level_f[l], 1, &error_in);
norm_in = error_in;
hipMemcpy(p_f, r_level_f[l], dl*dl*sizeof(float), hipMemcpyDeviceToDevice);
for (int loop=0; loop<inner_loop; loop++)
{
hipLaunchKernelGGL(( LAPLACIAN_FLOAT), dim3(bpg),dim3(tpb), 0, 0, dl, (float)(dx), (float)(photon_mass), 1./powf(2.,2.*(l+1)), p_f, A_p_f);
hipblasSdot(handle, dl*dl, p_f, 1, A_p_f, 1, &temp_f);
alpha_f = error_in/temp_f;
temp_f = -alpha_f;
hipblasSaxpy(handle, dl*dl, &temp_f, A_p_f, 1, r_level_f[l], 1);
hipblasSaxpy(handle, dl*dl, &alpha_f, p_f, 1, field_level_f[l], 1);
hipblasSdot(handle, dl*dl, r_level_f[l], 1, r_level_f[l], 1, &temp_f);
beta_f = temp_f/error_in;
hipLaunchKernelGGL(( DAXPY_FLOAT), dim3(bpg),dim3(tpb), 0, 0, dl, beta_f, p_f, r_level_f[l]);
error_in = temp_f;
}
}
hipLaunchKernelGGL(( INTERPOLATE_2D_LAST), dim3(bpg),dim3(tpb), 0, 0, N, field_level_f[0], p_d);
hipblasDaxpy(handle, N*N, &one_d, p_d, 1, field_d, 1);
hipLaunchKernelGGL(( LAPLACIAN_DOUBLE), dim3(bpg),dim3(tpb), 0, 0, N, dx, photon_mass, 1., field_d, r_d);
hipLaunchKernelGGL(( DAXPY_DOUBLE), dim3(bpg),dim3(tpb), 0, 0, N, mone_d, r_d, rho);
hipblasDdot(handle, N*N, r_d, 1, r_d, 1, &error);
hipMemcpy(p_d, r_d, N*N*sizeof(double), hipMemcpyDeviceToDevice);
}
else
dl = N;
hipLaunchKernelGGL(( LAPLACIAN_DOUBLE), dim3(bpg),dim3(tpb), 0, 0, N, dx, photon_mass, 1., p_d, A_p_d);
hipblasDdot(handle, N*N, p_d, 1, A_p_d, 1, &temp_d);
alpha_d = error/temp_d;
temp_d = -alpha_d;
hipblasDaxpy(handle, N*N, &temp_d, A_p_d, 1, r_d, 1);
hipblasDaxpy(handle, N*N, &alpha_d, p_d, 1, field_d, 1);
hipblasDdot(handle, N*N, r_d, 1, r_d, 1, &temp_d);
beta_d = temp_d/error;
hipLaunchKernelGGL(( DAXPY_DOUBLE), dim3(bpg),dim3(tpb), 0, 0, N, beta_d, p_d, r_d);
error = temp_d;
iter += 1;
if (iter%display_interval==0)
printf("Iteration = %ld , error = %.8e .\n", iter, sqrt(error)/norm);
}
output_field = fopen("simulated_field_distribution_GPU_MGCG_MIXED.txt","w");
FPRINTF(output_field, N, 1., field_d);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&computation_time, start, stop);
printf("Computation time is %.4f ms.\n", computation_time);
total_time = preparation_time + computation_time;
printf("Total iteration is %ld ; total time is %.4f ms.\n", iter, total_time);
hipFree(p_d);
hipFree(A_p_d);
hipFree(p_f);
hipFree(A_p_f);
hipFree(field_analytic);
hipFree(rho);
hipFree(error_block);
hipFree(dimension_level);
hipFree(r_temp_f);
hipFree(r_temp_d);
hipFree(field_d);
hipFree(r_d);
hipblasDestroy(handle);
fclose(output_field);
fclose(output_rho);
free(field_level_f);
free(r_level_f);
return EXIT_SUCCESS;
}
double EVALUATE_ERROR(int N, int N_block, double* error_block)
{
double error = 0.0;
for (int i=0; i<N_block; i++)
error += error_block[i];
return error;
}
void FPRINTF(FILE *output_file, int N, double scale, double *array)
{
for (int j=0; j<N; j++)
{
for (int i=0; i<N; i++)
fprintf(output_file, "%.8e\t", scale*array[i+j*N]);
fprintf(output_file, "\n");
}
}
|
00962608c7d81a39e00b2e066607689c27d3e1a7.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <math.h>
#include <cuda_runtime.h>
#include <cublas_v2.h>
double EVALUATE_ERROR(int, int, double*);
void FPRINTF(FILE*, int N, double, double*);
__global__ void INITIALIZE(int N, double dx, double* rho, double* field, double* field_analytic)
{
int idx_x = threadIdx.x + blockIdx.x*blockDim.x;
int idx_y = threadIdx.y + blockIdx.y*blockDim.y;
if (idx_x<N&&idx_y<N)
{
int idx = idx_x + idx_y*N;
double x = idx_x*dx;
double y = idx_y*dx;
field_analytic[idx] = x*(1.-x)*y*(1.-y)*exp(x-y);
if (idx_x!=0&&idx_x!=N-1&&idx_y!=0&&idx_y!=N-1)
{
field[idx] = 0.0;
rho[idx] = (2.*x*(y-1)*(y-2.*x+x*y+2)*exp(x-y))*dx*dx; // Notice that rho has been times by dx^2!!
}
else
{
field[idx] = field_analytic[idx];
rho[idx] = 0.0;
}
}
}
__global__ void EVALUATE_ERROR_BLOCK(int N, double* rho, double* field, double* error_block)
{
extern __shared__ double sm[];
int idx_x = threadIdx.x + blockIdx.x*blockDim.x;
int idx_y = threadIdx.y + blockIdx.y*blockDim.y;
int idx_sm = threadIdx.x + blockDim.x*threadIdx.y;
sm[idx_sm] = 0.0;
__syncthreads();
if (idx_x<N&&idx_y<N)
{
int idx = idx_x + N*idx_y;
int idx_sm = threadIdx.x + blockDim.x*threadIdx.y;
if (idx_x!=0&&idx_x!=N-1&&idx_y!=0&&idx_y!=N-1)
{
int L = idx_x-1 + idx_y*N;
int R = idx_x+1 + idx_y*N;
int U = idx_x + (idx_y+1)*N;
int D = idx_x + (idx_y-1)*N;
sm[idx_sm] = pow((field[L]+field[R]+field[U]+field[D]-4.*field[idx])-rho[idx], 2.);
}
__syncthreads();
for (int shift=blockDim.x*blockDim.y/2; shift>0; shift/=2)
{
if (idx_sm<shift)
sm[idx_sm] += sm[idx_sm+shift];
__syncthreads();
}
if (idx_sm==0)
error_block[blockIdx.x+blockIdx.y*gridDim.x] = sm[0];
}
}
__global__ void LAPLACIAN_DOUBLE(int N, double dx, double photon_mass, double factor, double* p, double* A_p)
{
int idx_x = threadIdx.x + blockDim.x*blockIdx.x;
int idx_y = threadIdx.y + blockDim.y*blockIdx.y;
if (idx_x<N&&idx_y<N)
{
int idx = idx_x + N*idx_y;
if (idx_x!=0&&idx_x!=N-1&&idx_y!=0&&idx_y!=N-1)
{
int L = idx_x-1 + idx_y*N;
int R = idx_x+1 + idx_y*N;
int U = idx_x + (idx_y+1)*N;
int D = idx_x + (idx_y-1)*N;
A_p[idx] = factor*(p[L]+p[R]+p[U]+p[D]-(4.+pow(photon_mass*dx,2.))*p[idx]);
}
else
A_p[idx] = factor*p[idx];
}
}
__global__ void LAPLACIAN_FLOAT(int N, float dx, float photon_mass, float factor, float* p, float* A_p)
{
int idx_x = threadIdx.x + blockDim.x*blockIdx.x;
int idx_y = threadIdx.y + blockDim.y*blockIdx.y;
if (idx_x<N&&idx_y<N)
{
int idx = idx_x + N*idx_y;
if (idx_x!=0&&idx_x!=N-1&&idx_y!=0&&idx_y!=N-1)
{
int L = idx_x-1 + idx_y*N;
int R = idx_x+1 + idx_y*N;
int U = idx_x + (idx_y+1)*N;
int D = idx_x + (idx_y-1)*N;
A_p[idx] = factor*(p[L]+p[R]+p[U]+p[D]-(4.+powf(photon_mass*dx,2.))*p[idx]);
}
else
A_p[idx] = factor*p[idx];
}
}
__global__ void DAXPY_DOUBLE(int N, double c, double *A, double *B)
{
int idx_x = threadIdx.x + blockDim.x*blockIdx.x;
int idx_y = threadIdx.y + blockDim.y*blockIdx.y;
if (idx_x<N&&idx_y<N)
{
int idx = idx_x + N*idx_y;
A[idx] = c*A[idx] + B[idx];
}
}
__global__ void DAXPY_FLOAT(int N, float c, float *A, float *B)
{
int idx_x = threadIdx.x + blockDim.x*blockIdx.x;
int idx_y = threadIdx.y + blockDim.y*blockIdx.y;
if (idx_x<N&&idx_y<N)
{
int idx = idx_x + N*idx_y;
A[idx] = c*A[idx] + B[idx];
}
}
__global__ void INTERPOLATE_2D_LAST(int dimension, float* field_coarse, double* field_fine)
{
int N_fine = dimension;
int idx_x_fine = threadIdx.x + blockDim.x*blockIdx.x;
int idx_y_fine = threadIdx.y + blockDim.y*blockIdx.y;
if (idx_x_fine<N_fine&&idx_y_fine<N_fine)
{
int idx_fine = idx_x_fine + N_fine*idx_y_fine;
int N_coarse = (N_fine-1)/2 + 1;
int idx_x_coarse = idx_x_fine/2;
int idx_y_coarse = idx_y_fine/2;
int idx_coarse = idx_x_coarse + N_coarse*idx_y_coarse;
if (idx_x_fine%2==0&&idx_y_fine%2==0)
field_fine[idx_fine] = (double)(field_coarse[idx_coarse]);
else if (idx_x_fine%2==1&&idx_y_fine%2==0)
field_fine[idx_fine] = (double)(0.5*(field_coarse[idx_coarse]+field_coarse[idx_coarse+1]));
else if (idx_x_fine%2==0&&idx_y_fine%2==1)
field_fine[idx_fine] = (double)(0.5*(field_coarse[idx_coarse]+field_coarse[idx_coarse+N_coarse]));
else
field_fine[idx_fine] = (double)(0.25*(field_coarse[idx_coarse]+field_coarse[idx_coarse+1]+field_coarse[idx_coarse+N_coarse]+field_coarse[idx_coarse+N_coarse+1]));
}
}
__global__ void INTERPOLATE_2D(int dimension, float* field_coarse, float* field_fine)
{
int N_fine = dimension;
int idx_x_fine = threadIdx.x + blockDim.x*blockIdx.x;
int idx_y_fine = threadIdx.y + blockDim.y*blockIdx.y;
if (idx_x_fine<N_fine&&idx_y_fine<N_fine)
{
int idx_fine = idx_x_fine + N_fine*idx_y_fine;
int N_coarse = (N_fine-1)/2 + 1;
int idx_x_coarse = idx_x_fine/2;
int idx_y_coarse = idx_y_fine/2;
int idx_coarse = idx_x_coarse + N_coarse*idx_y_coarse;
if (idx_x_fine%2==0&&idx_y_fine%2==0)
field_fine[idx_fine] = field_coarse[idx_coarse];
else if (idx_x_fine%2==1&&idx_y_fine%2==0)
field_fine[idx_fine] = 0.5*(field_coarse[idx_coarse]+field_coarse[idx_coarse+1]);
else if (idx_x_fine%2==0&&idx_y_fine%2==1)
field_fine[idx_fine] = 0.5*(field_coarse[idx_coarse]+field_coarse[idx_coarse+N_coarse]);
else
field_fine[idx_fine] = 0.25*(field_coarse[idx_coarse]+field_coarse[idx_coarse+1]+field_coarse[idx_coarse+N_coarse]+field_coarse[idx_coarse+N_coarse+1]);
}
}
__global__ void RESTRICT_2D_FIRST(int dimension, double* field_fine, float* field_coarse)
{
int N_coarse = dimension;
int idx_x_coarse = threadIdx.x + blockDim.x*blockIdx.x;
int idx_y_coarse = threadIdx.y + blockDim.y*blockIdx.y;
if (idx_x_coarse<N_coarse&&idx_y_coarse<N_coarse)
{
int idx_coarse = idx_x_coarse + N_coarse*idx_y_coarse;
int N_fine = (N_coarse-1)*2 + 1;
int idx_x_fine = idx_x_coarse*2;
int idx_y_fine = idx_y_coarse*2;
int idx_fine = idx_x_fine + idx_y_fine*N_fine;
if (idx_x_coarse!=0&&idx_x_coarse!=N_coarse-1&&idx_y_coarse!=0&&idx_y_coarse!=N_coarse-1)
field_coarse[idx_coarse] = (float)(1./16.*(field_fine[idx_fine-4]+field_fine[idx_fine-2]+field_fine[idx_fine+2]+field_fine[idx_fine+4]) + 1./8.*(field_fine[idx_fine-3]+field_fine[idx_fine-1]+field_fine[idx_fine+1]+field_fine[idx_fine+3]) + 1./4.*field_fine[idx_fine]);
else
field_coarse[idx_coarse] = (float)(field_fine[idx_fine]);
}
}
__global__ void RESTRICT_2D(int dimension, float* field_fine, float* field_coarse)
{
int N_coarse = dimension;
int idx_x_coarse = threadIdx.x + blockDim.x*blockIdx.x;
int idx_y_coarse = threadIdx.y + blockDim.y*blockIdx.y;
if (idx_x_coarse<N_coarse&&idx_y_coarse<N_coarse)
{
int idx_coarse = idx_x_coarse + N_coarse*idx_y_coarse;
int N_fine = (N_coarse-1)*2 + 1;
int idx_x_fine = idx_x_coarse*2;
int idx_y_fine = idx_y_coarse*2;
int idx_fine = idx_x_fine + idx_y_fine*N_fine;
if (idx_x_coarse!=0&&idx_x_coarse!=N_coarse-1&&idx_y_coarse!=0&&idx_y_coarse!=N_coarse-1)
field_coarse[idx_coarse] = (float)(1./16.*(field_fine[idx_fine-4]+field_fine[idx_fine-2]+field_fine[idx_fine+2]+field_fine[idx_fine+4]) + 1./8.*(field_fine[idx_fine-3]+field_fine[idx_fine-1]+field_fine[idx_fine+1]+field_fine[idx_fine+3]) + 1./4.*field_fine[idx_fine]);
else
field_coarse[idx_coarse] = (float)(field_fine[idx_fine]);
}
}
int main(void)
{
int N, N_level, inner_loop, N_block, display_interval, tpb_x, tpb_y, bpg_x, bpg_y;
float preparation_time, computation_time, total_time;
float alpha_f, beta_f;
double photon_mass, dx, criteria;
double alpha_d, beta_d, error;
long iter, iter_max;
int *dimension_level;
float *p_f, *A_p_f, *r_temp_f;
float **field_level_f, **r_level_f;
double *rho, *p_d, *A_p_d, *field_analytic, *error_block, *r_temp_d;
double *field_d, *r_d;
size_t size_lattice_d, size_lattice_f, size_sm;
cudaEvent_t start, stop;
FILE* output_field, *output_rho;
printf("Solve the Poission problem using CG by GPU.\n\n");
printf("Enter the latttice size (N,N) .");
scanf("%d", &N);
printf("The lattice size is (%d,%d).\n", N, N);
printf("Set the depth of the V process.\n");
scanf("%d",&N_level);
printf("The depth of V process is %d .\n", N_level);
printf("Set the number of inner-loop.\n");
scanf("%d",&inner_loop);
printf("The number of inner-loop is %d .\n", inner_loop);
printf("Set the photon mass.\n");
scanf("%lf", &photon_mass);
printf("The photon mass is %.4e .\n", photon_mass);
printf("Set the maximum iteration times.\n");
scanf("%ld", &iter_max);
printf("The maximum iteration times is %ld .\n", iter_max);
printf("Set the stopping criteria.\n");
scanf("%lf", &criteria);
printf("The stopping criteria is %.4e .\n", criteria);
printf("Set the display interval during iterations.\n");
scanf("%d", &display_interval);
printf("The display interval is set to be %d .\n", display_interval);
printf("Set the GPU threads per block (tx,ty). \n");
scanf("%d %d", &tpb_x, &tpb_y);
printf("Threads per block for GPU is (%d,%d) .\n", tpb_x, tpb_y);
printf("The block per grid will be set automatically.");
bpg_x = (N+tpb_x-1)/tpb_x;
bpg_y = (N+tpb_y-1)/tpb_y;
printf("Blocks per grid for GPU is (%d,%d) .\n", bpg_x, bpg_y);
printf("\n");
printf("Start Preparation...\n");
dx = 1./(N-1);
N_block = bpg_x*bpg_y;
size_lattice_d = N*N*sizeof(double);
size_lattice_f = N*N*sizeof(float);
size_sm = tpb_x*tpb_y*sizeof(double);
output_field = fopen("analytical_field_distribution_CG.txt","w");
output_rho = fopen("charge_distribution_CG.txt","w");
cudaSetDevice(0);
cudaEventCreate(&start);
cudaEventCreate(&stop);
dim3 tpb(tpb_x,tpb_y);
dim3 bpg(bpg_x,bpg_y);
cublasMath_t mode = CUBLAS_TENSOR_OP_MATH;
cublasPointerMode_t mode_pt = CUBLAS_POINTER_MODE_HOST;
cublasHandle_t handle;
cublasCreate(&handle);
cublasSetMathMode(handle, mode);
cublasSetPointerMode(handle, mode_pt);
cudaEventRecord(start,0);
cudaMallocManaged(&p_f, size_lattice_f);
cudaMallocManaged(&A_p_f, size_lattice_f);
cudaMallocManaged(&p_d, size_lattice_d);
cudaMallocManaged(&A_p_d, size_lattice_d);
cudaMallocManaged(&field_analytic, size_lattice_d);
cudaMallocManaged(&rho, size_lattice_d);
cudaMallocManaged(&error_block, N_block*sizeof(double));
cudaMallocManaged(&dimension_level, (N_level+1)*sizeof(int));
cudaMallocManaged(&r_temp_f, size_lattice_f);
cudaMallocManaged(&r_temp_d, size_lattice_d);
cudaMallocManaged(&field_d, size_lattice_d);
cudaMallocManaged(&r_d, size_lattice_d);
/* allocate the memory for multi-grid */
field_level_f = (float**)malloc(N_level*sizeof(float*));
r_level_f = (float**)malloc(N_level*sizeof(float*));
int dimension = (N-1)/2;
for (int level=0; level<N_level; level++)
{
cudaMallocManaged(&field_level_f[level], (dimension+1)*(dimension+1)*sizeof(float));
cudaMallocManaged(&r_level_f[level], (dimension+1)*(dimension+1)*sizeof(float));
dimension_level[level] = dimension + 1;
dimension /= 2;
}
INITIALIZE<<<bpg,tpb>>>(N, dx, rho, field_d, field_analytic);
EVALUATE_ERROR_BLOCK<<<bpg,tpb,size_sm>>>(N, rho, field_d, error_block);
double norm;
cublasDdot(handle, N*N, rho, 1, rho, 1, &norm);
norm = sqrt(norm);
cudaMemcpy(r_d, rho, size_lattice_d, cudaMemcpyDeviceToDevice);
// if (N_level==0)
// cudaMemcpy(p_d, rho, size_lattice_d, cudaMemcpyDeviceToDevice);
cudaMemcpy(p_d, rho, size_lattice_d, cudaMemcpyDeviceToDevice);
cudaMemset(field_d, 0, size_lattice_d);
FPRINTF(output_field, N, 1., field_analytic);
FPRINTF(output_rho, N, pow(dx,-2.), rho);
printf("Preparation ends.\n");
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&preparation_time, start, stop);
printf("Total preparation time is %.4f ms.\n\n", preparation_time);
cudaEventRecord(start,0);
error = EVALUATE_ERROR(N, N_block, error_block);
float temp_f, norm_in, error_in = 1.0;
double temp_d;
printf("Starts computation with error = %.8e...\n", sqrt(error)/norm);
iter = 0;
int dl;
float one_f = 1.;
float mone_f = -1.;
float zero_f = 0.;
double one_d = 1.;
double mone_d = -1.;
double zero_d = 0.;
while (sqrt(error)/norm>criteria&&iter<iter_max)
{
if (N_level!=0)
{
for (int loop=0; loop<inner_loop; loop++)
{
LAPLACIAN_DOUBLE<<<bpg,tpb>>>(N, dx, photon_mass, 1., p_d, A_p_d);
cublasDdot(handle, N*N, p_d, 1, A_p_d, 1, &temp_d);
alpha_d = error/temp_d;
temp_d = -alpha_d;
cublasDaxpy(handle, N*N, &temp_d, A_p_d, 1, r_d, 1);
cublasDaxpy(handle, N*N, &alpha_d, p_d, 1, field_d, 1);
cublasDdot(handle, N*N, r_d, 1, r_d, 1, &temp_d);
beta_d = temp_d/error;
DAXPY_DOUBLE<<<bpg,tpb>>>(N, beta_d, p_d, r_d);
error = temp_d;
}
dl = dimension_level[0];
RESTRICT_2D_FIRST<<<bpg,tpb>>>(dl, r_d, r_level_f[0]);
for (int l=0; l<N_level-1; l++)
{
cudaMemset(field_level_f[l], 0, dl*dl*sizeof(float));
cudaMemcpy(p_f, r_level_f[l], dl*dl*sizeof(float), cudaMemcpyDeviceToDevice);
cublasSdot(handle, dl*dl, r_level_f[l], 1, r_level_f[l], 1, &error_in);
for (int loop=0; loop<inner_loop; loop++)
{
LAPLACIAN_FLOAT<<<bpg,tpb>>>(dl, (float)(dx), (float)(photon_mass), 1./powf(2.,2.*(l+1)), p_f, A_p_f);
cublasSdot(handle, dl*dl, p_f, 1, A_p_f, 1, &temp_f);
alpha_f = error_in/temp_f;
temp_f = -alpha_f;
cublasSaxpy(handle, dl*dl, &temp_f, A_p_f, 1, r_level_f[l], 1);
cublasSaxpy(handle, dl*dl, &alpha_f, p_f, 1, field_level_f[l], 1);
cublasSdot(handle, dl*dl, r_level_f[l], 1, r_level_f[l], 1, &temp_f);
beta_f = temp_f/error_in;
DAXPY_FLOAT<<<bpg,tpb>>>(dl, beta_f, p_f, r_level_f[l]);
error_in = temp_f;
}
// printf("%.4e\n", error_in);
dl = dimension_level[l+1];
RESTRICT_2D<<<bpg,tpb>>>(dl, r_level_f[l], r_level_f[l+1]);
}
dl = dimension_level[N_level-1];
cudaMemset(field_level_f[N_level-1], 0, dl*dl*sizeof(float));
cublasSdot(handle, dl*dl, r_level_f[N_level-1], 1, r_level_f[N_level-1], 1, &error_in);
norm_in = error_in;
cudaMemcpy(p_f, r_level_f[N_level-1], dl*dl*sizeof(float), cudaMemcpyDeviceToDevice);
for (int loop=0; loop<inner_loop; loop++)
{
LAPLACIAN_FLOAT<<<bpg,tpb>>>(dl, (float)(dx), (float)(photon_mass), 1./powf(2.,2.*(N_level)), p_f, A_p_f);
cublasSdot(handle, dl*dl, p_f, 1, A_p_f, 1, &temp_f);
alpha_f = error_in/temp_f;
temp_f = -alpha_f;
cublasSaxpy(handle, dl*dl, &temp_f, A_p_f, 1, r_level_f[N_level-1], 1);
cublasSaxpy(handle, dl*dl, &alpha_f, p_f, 1, field_level_f[N_level-1], 1);
cublasSdot(handle, dl*dl, r_level_f[N_level-1], 1, r_level_f[N_level-1], 1, &temp_f);
beta_f = temp_f/error_in;
DAXPY_FLOAT<<<bpg,tpb>>>(dl, beta_f, p_f, r_level_f[N_level-1]);
error_in = temp_f;
}
for (int l=N_level-2; l>=0; l--)
{
dl = dimension_level[l];
cudaMemcpy(r_temp_f, r_level_f[l], dl*dl*sizeof(float), cudaMemcpyDeviceToDevice);
INTERPOLATE_2D<<<bpg,tpb>>>(dl, field_level_f[l+1], p_f);
cublasSaxpy(handle, dl*dl, &one_f, p_f, 1, field_level_f[l], 1);
LAPLACIAN_FLOAT<<<bpg,tpb>>>(dl, (float)(dx), (float)(photon_mass), 1./powf(2.,2.*(l+1)), field_level_f[l], r_level_f[l]);
DAXPY_FLOAT<<<bpg,tpb>>>(dl, mone_f, r_level_f[l], r_temp_f);
cublasSdot(handle, dl*dl, r_level_f[l], 1, r_level_f[l], 1, &error_in);
norm_in = error_in;
cudaMemcpy(p_f, r_level_f[l], dl*dl*sizeof(float), cudaMemcpyDeviceToDevice);
for (int loop=0; loop<inner_loop; loop++)
{
LAPLACIAN_FLOAT<<<bpg,tpb>>>(dl, (float)(dx), (float)(photon_mass), 1./powf(2.,2.*(l+1)), p_f, A_p_f);
cublasSdot(handle, dl*dl, p_f, 1, A_p_f, 1, &temp_f);
alpha_f = error_in/temp_f;
temp_f = -alpha_f;
cublasSaxpy(handle, dl*dl, &temp_f, A_p_f, 1, r_level_f[l], 1);
cublasSaxpy(handle, dl*dl, &alpha_f, p_f, 1, field_level_f[l], 1);
cublasSdot(handle, dl*dl, r_level_f[l], 1, r_level_f[l], 1, &temp_f);
beta_f = temp_f/error_in;
DAXPY_FLOAT<<<bpg,tpb>>>(dl, beta_f, p_f, r_level_f[l]);
error_in = temp_f;
}
}
INTERPOLATE_2D_LAST<<<bpg,tpb>>>(N, field_level_f[0], p_d);
cublasDaxpy(handle, N*N, &one_d, p_d, 1, field_d, 1);
LAPLACIAN_DOUBLE<<<bpg,tpb>>>(N, dx, photon_mass, 1., field_d, r_d);
DAXPY_DOUBLE<<<bpg,tpb>>>(N, mone_d, r_d, rho);
cublasDdot(handle, N*N, r_d, 1, r_d, 1, &error);
cudaMemcpy(p_d, r_d, N*N*sizeof(double), cudaMemcpyDeviceToDevice);
}
else
dl = N;
LAPLACIAN_DOUBLE<<<bpg,tpb>>>(N, dx, photon_mass, 1., p_d, A_p_d);
cublasDdot(handle, N*N, p_d, 1, A_p_d, 1, &temp_d);
alpha_d = error/temp_d;
temp_d = -alpha_d;
cublasDaxpy(handle, N*N, &temp_d, A_p_d, 1, r_d, 1);
cublasDaxpy(handle, N*N, &alpha_d, p_d, 1, field_d, 1);
cublasDdot(handle, N*N, r_d, 1, r_d, 1, &temp_d);
beta_d = temp_d/error;
DAXPY_DOUBLE<<<bpg,tpb>>>(N, beta_d, p_d, r_d);
error = temp_d;
iter += 1;
if (iter%display_interval==0)
printf("Iteration = %ld , error = %.8e .\n", iter, sqrt(error)/norm);
}
output_field = fopen("simulated_field_distribution_GPU_MGCG_MIXED.txt","w");
FPRINTF(output_field, N, 1., field_d);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&computation_time, start, stop);
printf("Computation time is %.4f ms.\n", computation_time);
total_time = preparation_time + computation_time;
printf("Total iteration is %ld ; total time is %.4f ms.\n", iter, total_time);
cudaFree(p_d);
cudaFree(A_p_d);
cudaFree(p_f);
cudaFree(A_p_f);
cudaFree(field_analytic);
cudaFree(rho);
cudaFree(error_block);
cudaFree(dimension_level);
cudaFree(r_temp_f);
cudaFree(r_temp_d);
cudaFree(field_d);
cudaFree(r_d);
cublasDestroy(handle);
fclose(output_field);
fclose(output_rho);
free(field_level_f);
free(r_level_f);
return EXIT_SUCCESS;
}
double EVALUATE_ERROR(int N, int N_block, double* error_block)
{
double error = 0.0;
for (int i=0; i<N_block; i++)
error += error_block[i];
return error;
}
void FPRINTF(FILE *output_file, int N, double scale, double *array)
{
for (int j=0; j<N; j++)
{
for (int i=0; i<N; i++)
fprintf(output_file, "%.8e\t", scale*array[i+j*N]);
fprintf(output_file, "\n");
}
}
|
da269e2fa0815cb2e7cf60e1e00ccf73863e9d05.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "unary_op_grad_impl.cuh"
template <typename T>
__global__ void SqrtGradKernel(const T *input, const T *dout, T *output, const size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
float input_f = static_cast<float>(input[i]);
float dout_f = static_cast<float>(dout[i]);
float res_vmul = dout_f / (2.0 * input_f);
output[i] = static_cast<T>(res_vmul);
}
return;
}
template <typename T>
__global__ void RsqrtGradKernel(const T *input, const T *dout, T *output, const size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
float input_f = static_cast<float>(input[i]);
float dout_f = static_cast<float>(dout[i]);
float res_vmul = input_f * input_f * input_f;
res_vmul = -0.5 * res_vmul * dout_f;
output[i] = static_cast<T>(res_vmul);
}
return;
}
template <typename T>
__global__ void AsinGradKernel(const T *input, const T *dout, T *output, const size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
T one = 1;
T sqt = sqrtf(one - input[i] * input[i]);
output[i] = dout[i] / sqt;
}
return;
}
template <>
__global__ void AsinGradKernel(const half *input, const half *dout, half *output, const size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
half one = 1;
half sqt = hsqrt(one - input[i] * input[i]);
output[i] = dout[i] / sqt;
}
return;
}
template <typename T>
__global__ void ACosGradKernel(const T *input, const T *dout, T *output, const size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
T neg_one = -1;
T one = 1;
T sqt = sqrtf(one - input[i] * input[i]);
output[i] = neg_one * dout[i] / sqt;
}
return;
}
template <>
__global__ void ACosGradKernel(const half *input, const half *dout, half *output, const size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
half neg_one = -1;
half one = 1;
half sqt = hsqrt(one - input[i] * input[i]);
output[i] = neg_one * dout[i] / sqt;
}
return;
}
template <typename T>
__global__ void AtanGradKernel(const T *input, const T *dout, T *output, const size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
T one = 1;
T divisor = one + input[i] * input[i];
output[i] = dout[i] / divisor;
}
return;
}
template <typename T>
__global__ void AsinhGradKernel(const T *input, const T *dout, T *output, const size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
float inputf = static_cast<float>(input[i]);
T coshy = static_cast<T>(coshf(inputf));
output[i] = dout[i] / coshy;
}
return;
}
template <typename T>
__global__ void AcoshGradKernel(const T *input, const T *dout, T *output, const size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
float inputf = static_cast<float>(input[i]);
T sinhy = static_cast<T>(sinhf(inputf));
output[i] = dout[i] / sinhy;
}
return;
}
template <typename T>
__global__ void ReciprocalGradKernel(const T *input, const T *dout, T *output, const size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) {
float inputf = static_cast<float>(input[i]);
float doutf = static_cast<float>(dout[i]);
float res = -1 * doutf * inputf * inputf;
output[i] = static_cast<T>(res);
}
return;
}
template <typename T>
void SqrtGrad(const T *input, const T *dout, T *output, const size_t count, hipStream_t cuda_stream) {
hipLaunchKernelGGL(( SqrtGradKernel), dim3(GET_BLOCKS(count)), dim3(GET_THREADS), 0, cuda_stream, input, dout, output, count);
return;
}
template <typename T>
void RsqrtGrad(const T *input, const T *dout, T *output, const size_t count, hipStream_t cuda_stream) {
hipLaunchKernelGGL(( RsqrtGradKernel), dim3(GET_BLOCKS(count)), dim3(GET_THREADS), 0, cuda_stream, input, dout, output, count);
return;
}
template <typename T>
void AsinGrad(const T *input, const T *dout, T *output, const size_t count, hipStream_t cuda_stream) {
hipLaunchKernelGGL(( AsinGradKernel), dim3(GET_BLOCKS(count)), dim3(GET_THREADS), 0, cuda_stream, input, dout, output, count);
return;
}
template <typename T>
void ACosGrad(const T *input, const T *dout, T *output, const size_t count, hipStream_t cuda_stream) {
hipLaunchKernelGGL(( ACosGradKernel), dim3(GET_BLOCKS(count)), dim3(GET_THREADS), 0, cuda_stream, input, dout, output, count);
return;
}
template <typename T>
void AtanGrad(const T *input, const T *dout, T *output, const size_t count, hipStream_t cuda_stream) {
hipLaunchKernelGGL(( AtanGradKernel), dim3(GET_BLOCKS(count)), dim3(GET_THREADS), 0, cuda_stream, input, dout, output, count);
return;
}
template <typename T>
void AsinhGrad(const T *input, const T *dout, T *output, const size_t count, hipStream_t cuda_stream) {
hipLaunchKernelGGL(( AsinhGradKernel), dim3(GET_BLOCKS(count)), dim3(GET_THREADS), 0, cuda_stream, input, dout, output, count);
return;
}
template <typename T>
void AcoshGrad(const T *input, const T *dout, T *output, const size_t count, hipStream_t cuda_stream) {
hipLaunchKernelGGL(( AcoshGradKernel), dim3(GET_BLOCKS(count)), dim3(GET_THREADS), 0, cuda_stream, input, dout, output, count);
return;
}
template <typename T>
void ReciprocalGrad(const T *input, const T *dout, T *output, const size_t count, hipStream_t cuda_stream) {
hipLaunchKernelGGL(( ReciprocalGradKernel), dim3(GET_BLOCKS(count)), dim3(GET_THREADS), 0, cuda_stream, input, dout, output, count);
return;
}
template void SqrtGrad<float>(const float *input, const float *dout, float *output, const size_t count,
hipStream_t cuda_stream);
template void RsqrtGrad<float>(const float *input, const float *dout, float *output, const size_t count,
hipStream_t cuda_stream);
template void AsinGrad<float>(const float *input, const float *dout, float *output, const size_t count,
hipStream_t cuda_stream);
template void ACosGrad<float>(const float *input, const float *dout, float *output, const size_t count,
hipStream_t cuda_stream);
template void AtanGrad<float>(const float *input, const float *dout, float *output, const size_t count,
hipStream_t cuda_stream);
template void AsinhGrad<float>(const float *input, const float *dout, float *output, const size_t count,
hipStream_t cuda_stream);
template void AcoshGrad<float>(const float *input, const float *dout, float *output, const size_t count,
hipStream_t cuda_stream);
template void ReciprocalGrad<float>(const float *input, const float *dout, float *output, const size_t count,
hipStream_t cuda_stream);
template void SqrtGrad<half>(const half *input, const half *dout, half *output, const size_t count,
hipStream_t cuda_stream);
template void RsqrtGrad<half>(const half *input, const half *dout, half *output, const size_t count,
hipStream_t cuda_stream);
template void AsinGrad<half>(const half *input, const half *dout, half *output, const size_t count,
hipStream_t cuda_stream);
template void ACosGrad<half>(const half *input, const half *dout, half *output, const size_t count,
hipStream_t cuda_stream);
template void AtanGrad<half>(const half *input, const half *dout, half *output, const size_t count,
hipStream_t cuda_stream);
template void AsinhGrad<half>(const half *input, const half *dout, half *output, const size_t count,
hipStream_t cuda_stream);
template void AcoshGrad<half>(const half *input, const half *dout, half *output, const size_t count,
hipStream_t cuda_stream);
template void ReciprocalGrad<half>(const half *input, const half *dout, half *output, const size_t count,
hipStream_t cuda_stream);
|
da269e2fa0815cb2e7cf60e1e00ccf73863e9d05.cu
|
/**
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "unary_op_grad_impl.cuh"
template <typename T>
__global__ void SqrtGradKernel(const T *input, const T *dout, T *output, const size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
float input_f = static_cast<float>(input[i]);
float dout_f = static_cast<float>(dout[i]);
float res_vmul = dout_f / (2.0 * input_f);
output[i] = static_cast<T>(res_vmul);
}
return;
}
template <typename T>
__global__ void RsqrtGradKernel(const T *input, const T *dout, T *output, const size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
float input_f = static_cast<float>(input[i]);
float dout_f = static_cast<float>(dout[i]);
float res_vmul = input_f * input_f * input_f;
res_vmul = -0.5 * res_vmul * dout_f;
output[i] = static_cast<T>(res_vmul);
}
return;
}
template <typename T>
__global__ void AsinGradKernel(const T *input, const T *dout, T *output, const size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
T one = 1;
T sqt = sqrtf(one - input[i] * input[i]);
output[i] = dout[i] / sqt;
}
return;
}
template <>
__global__ void AsinGradKernel(const half *input, const half *dout, half *output, const size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
half one = 1;
half sqt = hsqrt(one - input[i] * input[i]);
output[i] = dout[i] / sqt;
}
return;
}
template <typename T>
__global__ void ACosGradKernel(const T *input, const T *dout, T *output, const size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
T neg_one = -1;
T one = 1;
T sqt = sqrtf(one - input[i] * input[i]);
output[i] = neg_one * dout[i] / sqt;
}
return;
}
template <>
__global__ void ACosGradKernel(const half *input, const half *dout, half *output, const size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
half neg_one = -1;
half one = 1;
half sqt = hsqrt(one - input[i] * input[i]);
output[i] = neg_one * dout[i] / sqt;
}
return;
}
template <typename T>
__global__ void AtanGradKernel(const T *input, const T *dout, T *output, const size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
T one = 1;
T divisor = one + input[i] * input[i];
output[i] = dout[i] / divisor;
}
return;
}
template <typename T>
__global__ void AsinhGradKernel(const T *input, const T *dout, T *output, const size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
float inputf = static_cast<float>(input[i]);
T coshy = static_cast<T>(coshf(inputf));
output[i] = dout[i] / coshy;
}
return;
}
template <typename T>
__global__ void AcoshGradKernel(const T *input, const T *dout, T *output, const size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
float inputf = static_cast<float>(input[i]);
T sinhy = static_cast<T>(sinhf(inputf));
output[i] = dout[i] / sinhy;
}
return;
}
template <typename T>
__global__ void ReciprocalGradKernel(const T *input, const T *dout, T *output, const size_t count) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) {
float inputf = static_cast<float>(input[i]);
float doutf = static_cast<float>(dout[i]);
float res = -1 * doutf * inputf * inputf;
output[i] = static_cast<T>(res);
}
return;
}
template <typename T>
void SqrtGrad(const T *input, const T *dout, T *output, const size_t count, cudaStream_t cuda_stream) {
SqrtGradKernel<<<GET_BLOCKS(count), GET_THREADS, 0, cuda_stream>>>(input, dout, output, count);
return;
}
template <typename T>
void RsqrtGrad(const T *input, const T *dout, T *output, const size_t count, cudaStream_t cuda_stream) {
RsqrtGradKernel<<<GET_BLOCKS(count), GET_THREADS, 0, cuda_stream>>>(input, dout, output, count);
return;
}
template <typename T>
void AsinGrad(const T *input, const T *dout, T *output, const size_t count, cudaStream_t cuda_stream) {
AsinGradKernel<<<GET_BLOCKS(count), GET_THREADS, 0, cuda_stream>>>(input, dout, output, count);
return;
}
template <typename T>
void ACosGrad(const T *input, const T *dout, T *output, const size_t count, cudaStream_t cuda_stream) {
ACosGradKernel<<<GET_BLOCKS(count), GET_THREADS, 0, cuda_stream>>>(input, dout, output, count);
return;
}
template <typename T>
void AtanGrad(const T *input, const T *dout, T *output, const size_t count, cudaStream_t cuda_stream) {
AtanGradKernel<<<GET_BLOCKS(count), GET_THREADS, 0, cuda_stream>>>(input, dout, output, count);
return;
}
template <typename T>
void AsinhGrad(const T *input, const T *dout, T *output, const size_t count, cudaStream_t cuda_stream) {
AsinhGradKernel<<<GET_BLOCKS(count), GET_THREADS, 0, cuda_stream>>>(input, dout, output, count);
return;
}
template <typename T>
void AcoshGrad(const T *input, const T *dout, T *output, const size_t count, cudaStream_t cuda_stream) {
AcoshGradKernel<<<GET_BLOCKS(count), GET_THREADS, 0, cuda_stream>>>(input, dout, output, count);
return;
}
template <typename T>
void ReciprocalGrad(const T *input, const T *dout, T *output, const size_t count, cudaStream_t cuda_stream) {
ReciprocalGradKernel<<<GET_BLOCKS(count), GET_THREADS, 0, cuda_stream>>>(input, dout, output, count);
return;
}
template void SqrtGrad<float>(const float *input, const float *dout, float *output, const size_t count,
cudaStream_t cuda_stream);
template void RsqrtGrad<float>(const float *input, const float *dout, float *output, const size_t count,
cudaStream_t cuda_stream);
template void AsinGrad<float>(const float *input, const float *dout, float *output, const size_t count,
cudaStream_t cuda_stream);
template void ACosGrad<float>(const float *input, const float *dout, float *output, const size_t count,
cudaStream_t cuda_stream);
template void AtanGrad<float>(const float *input, const float *dout, float *output, const size_t count,
cudaStream_t cuda_stream);
template void AsinhGrad<float>(const float *input, const float *dout, float *output, const size_t count,
cudaStream_t cuda_stream);
template void AcoshGrad<float>(const float *input, const float *dout, float *output, const size_t count,
cudaStream_t cuda_stream);
template void ReciprocalGrad<float>(const float *input, const float *dout, float *output, const size_t count,
cudaStream_t cuda_stream);
template void SqrtGrad<half>(const half *input, const half *dout, half *output, const size_t count,
cudaStream_t cuda_stream);
template void RsqrtGrad<half>(const half *input, const half *dout, half *output, const size_t count,
cudaStream_t cuda_stream);
template void AsinGrad<half>(const half *input, const half *dout, half *output, const size_t count,
cudaStream_t cuda_stream);
template void ACosGrad<half>(const half *input, const half *dout, half *output, const size_t count,
cudaStream_t cuda_stream);
template void AtanGrad<half>(const half *input, const half *dout, half *output, const size_t count,
cudaStream_t cuda_stream);
template void AsinhGrad<half>(const half *input, const half *dout, half *output, const size_t count,
cudaStream_t cuda_stream);
template void AcoshGrad<half>(const half *input, const half *dout, half *output, const size_t count,
cudaStream_t cuda_stream);
template void ReciprocalGrad<half>(const half *input, const half *dout, half *output, const size_t count,
cudaStream_t cuda_stream);
|
f5377716a852422349248faad514e5e5f8bebc9a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* http://github.com/dusty-nv/jetson-inference
*/
#include "mathFunctions.h"
#include <iostream>
#include "../util/cuda/cudaUtility.h"
template <typename Dtype>
__global__ void Concat(const int nthreads, const Dtype* in_data,
const bool forward, const int num_concats, const int concat_size,
const int top_concat_axis, const int bottom_concat_axis,
const int offset_concat_axis, Dtype* out_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int total_concat_size = concat_size * bottom_concat_axis;
const int concat_num = index / total_concat_size;
const int concat_index = index % total_concat_size;
const int top_index = concat_index +
(concat_num * top_concat_axis + offset_concat_axis) * concat_size;
if (forward) {
out_data[top_index] = in_data[index];
} else {
out_data[index] = in_data[top_index];
}
}
}
hipError_t ConcatLayer(int nthreads, const float *bottom_data, bool kForward, int num_concats_, int concat_input_size_,
int top_concat_axis, int bottom_concat_axis, int offset_concat_axis, float *top_data, hipStream_t stream)
{
hipLaunchKernelGGL(( Concat<float>), dim3(TENSORRT_GET_BLOCKS(nthreads)), dim3(TENSORRT_CUDA_NUM_THREADS),0,stream, nthreads, bottom_data,
kForward, num_concats_, concat_input_size_, top_concat_axis, bottom_concat_axis, offset_concat_axis, top_data);
return hipPeekAtLastError();
}
// gpuPreImageNet
__global__ void gpuPreImageNet( float2 scale, float4* input, int iWidth, float* output, int oWidth, int oHeight )
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int n = oWidth * oHeight;
if( x >= oWidth || y >= oHeight )
return;
const int dx = ((float)x * scale.x);
const int dy = ((float)y * scale.y);
const float4 px = input[ dy * iWidth + dx ];
const float3 bgr = make_float3(px.z, px.y, px.x);
output[n * 0 + y * oWidth + x] = bgr.x;
output[n * 1 + y * oWidth + x] = bgr.y;
output[n * 2 + y * oWidth + x] = bgr.z;
}
// cudaPreImageNet
hipError_t cudaPreImageNet( float4* input, size_t inputWidth, size_t inputHeight,
float* output, size_t outputWidth, size_t outputHeight )
{
if( !input || !output )
return hipErrorInvalidDevicePointer;
if( inputWidth == 0 || outputWidth == 0 || inputHeight == 0 || outputHeight == 0 )
return hipErrorInvalidValue;
const float2 scale = make_float2( float(inputWidth) / float(outputWidth),
float(inputHeight) / float(outputHeight) );
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(outputWidth,blockDim.x), iDivUp(outputHeight,blockDim.y));
hipLaunchKernelGGL(( gpuPreImageNet), dim3(gridDim), dim3(blockDim), 0, 0, scale, input, inputWidth, output, outputWidth, outputHeight);
return CUDA(hipGetLastError());
}
// gpuPreImageNetMean
__global__ void gpuPreImageNetMean( float2 scale, float3* input, int iWidth, float* output, int oWidth, int oHeight, float3 mean_value )
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int n = oWidth * oHeight;
if( x >= oWidth || y >= oHeight )
return;
const int dx = ((float)x * scale.x);
const int dy = ((float)y * scale.y);
const float3 px = input[ dy * iWidth + dx ];
const float3 bgr = make_float3(px.z - mean_value.x, px.y - mean_value.y, px.x - mean_value.z);
output[n * 0 + y * oWidth + x] = bgr.x;
output[n * 1 + y * oWidth + x] = bgr.y;
output[n * 2 + y * oWidth + x] = bgr.z;
}
// cudaPreImageNetMean
hipError_t cudaPreImageNetMean( float3* input, size_t inputWidth, size_t inputHeight,
float* output, size_t outputWidth, size_t outputHeight, const float3& mean_value )
{
if( !input || !output ){
std::cout << "error here. "<< std::endl;
return hipErrorInvalidDevicePointer;
}
if( inputWidth == 0 || outputWidth == 0 || inputHeight == 0 || outputHeight == 0 ){
std::cout << "Or here. " << std::endl;
return hipErrorInvalidValue;
}
const float2 scale = make_float2( float(inputWidth) / float(outputWidth),
float(inputHeight) / float(outputHeight) );
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(outputWidth,blockDim.x), iDivUp(outputHeight,blockDim.y));
hipLaunchKernelGGL(( gpuPreImageNetMean), dim3(gridDim), dim3(blockDim), 0, 0, scale, input, inputWidth, output, outputWidth, outputHeight, mean_value);
return CUDA(hipGetLastError());
}
__global__ void kernel_extract_roi(float* input, float* output, char* mean,
const int input_w, const int output_w, const int output_h,
const int in_plane_r, const int in_plane_g, const int in_plane_b,
const int out_plane_r, const int out_plane_g, const int out_plane_b,
const int bbox_x, const int bbox_y, const int bbox_w, const int bbox_h)
{
uint x = blockIdx.x * blockDim.x + threadIdx.x;
uint y = blockIdx.y * blockDim.y + threadIdx.y;
if( x < output_w && y < output_h)
{
float r[2] = { float(x) * bbox_w / output_w + bbox_x,
float(y) * bbox_h / output_h + bbox_y };
int pos[4][2] = { { int(floor(r[0])), int(floor(r[1])) },
{ int( ceil(r[0])), int(floor(r[1])) },
{ int(floor(r[0])), int(ceil(r[1])) },
{ int( ceil(r[0])), int(ceil(r[1])) } };
float u = r[0]-floor(r[0]);
float v = r[1]-floor(r[1]);
float s[4] = { (1-u)*(1-v), u*(1-v), (1-u)*v, u*v };
int map[4] = { pos[0][1]*input_w + pos[0][0], pos[1][1]*input_w + pos[1][0],
pos[2][1]*input_w + pos[2][0], pos[3][1]*input_w + pos[3][0]};
int idx = y * output_w + x;
output[idx+out_plane_r] = round( s[0]*input[map[0]+in_plane_r]
+ s[1]*input[map[1]+in_plane_r]
+ s[2]*input[map[2]+in_plane_r]
+ s[3]*input[map[3]+in_plane_r] );// float(mean[idx+out_plane_r]));
output[idx+out_plane_g] = round( s[0]*input[map[0]+in_plane_g]
+ s[1]*input[map[1]+in_plane_g]
+ s[2]*input[map[2]+in_plane_g]
+ s[3]*input[map[3]+in_plane_g] );//float(mean[idx+out_plane_g]));
output[idx+out_plane_b] = round( s[0]*input[map[0]+in_plane_b]
+ s[1]*input[map[1]+in_plane_b]
+ s[2]*input[map[2]+in_plane_b]
+ s[3]*input[map[3]+in_plane_b] );//float(mean[idx+out_plane_b]));
}
}
__global__ void kernelSoftmax( float* x, int channels, float* y)
{
extern __shared__ float mem[];
__shared__ float sum_value;
float number = *(x + blockDim.x*blockIdx.x + threadIdx.x);
float number_exp = __expf(number);
atomicAdd(&sum_value, number_exp);
__syncthreads();
y[blockDim.x*blockIdx.x + threadIdx.x] = __fdiv_rd(number_exp, sum_value);
}
void cudaSoftmax(int n, int channels, float* x, float*y)
{
hipLaunchKernelGGL(( kernelSoftmax), dim3((n/channels)), dim3(channels), channels*sizeof(float), 0, x, channels, y);
hipDeviceSynchronize();
}
|
f5377716a852422349248faad514e5e5f8bebc9a.cu
|
/*
* http://github.com/dusty-nv/jetson-inference
*/
#include "mathFunctions.h"
#include <iostream>
#include "../util/cuda/cudaUtility.h"
template <typename Dtype>
__global__ void Concat(const int nthreads, const Dtype* in_data,
const bool forward, const int num_concats, const int concat_size,
const int top_concat_axis, const int bottom_concat_axis,
const int offset_concat_axis, Dtype* out_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int total_concat_size = concat_size * bottom_concat_axis;
const int concat_num = index / total_concat_size;
const int concat_index = index % total_concat_size;
const int top_index = concat_index +
(concat_num * top_concat_axis + offset_concat_axis) * concat_size;
if (forward) {
out_data[top_index] = in_data[index];
} else {
out_data[index] = in_data[top_index];
}
}
}
cudaError_t ConcatLayer(int nthreads, const float *bottom_data, bool kForward, int num_concats_, int concat_input_size_,
int top_concat_axis, int bottom_concat_axis, int offset_concat_axis, float *top_data, cudaStream_t stream)
{
Concat<float><<<TENSORRT_GET_BLOCKS(nthreads), TENSORRT_CUDA_NUM_THREADS,0,stream>>>(nthreads, bottom_data,
kForward, num_concats_, concat_input_size_, top_concat_axis, bottom_concat_axis, offset_concat_axis, top_data);
return cudaPeekAtLastError();
}
// gpuPreImageNet
__global__ void gpuPreImageNet( float2 scale, float4* input, int iWidth, float* output, int oWidth, int oHeight )
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int n = oWidth * oHeight;
if( x >= oWidth || y >= oHeight )
return;
const int dx = ((float)x * scale.x);
const int dy = ((float)y * scale.y);
const float4 px = input[ dy * iWidth + dx ];
const float3 bgr = make_float3(px.z, px.y, px.x);
output[n * 0 + y * oWidth + x] = bgr.x;
output[n * 1 + y * oWidth + x] = bgr.y;
output[n * 2 + y * oWidth + x] = bgr.z;
}
// cudaPreImageNet
cudaError_t cudaPreImageNet( float4* input, size_t inputWidth, size_t inputHeight,
float* output, size_t outputWidth, size_t outputHeight )
{
if( !input || !output )
return cudaErrorInvalidDevicePointer;
if( inputWidth == 0 || outputWidth == 0 || inputHeight == 0 || outputHeight == 0 )
return cudaErrorInvalidValue;
const float2 scale = make_float2( float(inputWidth) / float(outputWidth),
float(inputHeight) / float(outputHeight) );
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(outputWidth,blockDim.x), iDivUp(outputHeight,blockDim.y));
gpuPreImageNet<<<gridDim, blockDim>>>(scale, input, inputWidth, output, outputWidth, outputHeight);
return CUDA(cudaGetLastError());
}
// gpuPreImageNetMean
__global__ void gpuPreImageNetMean( float2 scale, float3* input, int iWidth, float* output, int oWidth, int oHeight, float3 mean_value )
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int n = oWidth * oHeight;
if( x >= oWidth || y >= oHeight )
return;
const int dx = ((float)x * scale.x);
const int dy = ((float)y * scale.y);
const float3 px = input[ dy * iWidth + dx ];
const float3 bgr = make_float3(px.z - mean_value.x, px.y - mean_value.y, px.x - mean_value.z);
output[n * 0 + y * oWidth + x] = bgr.x;
output[n * 1 + y * oWidth + x] = bgr.y;
output[n * 2 + y * oWidth + x] = bgr.z;
}
// cudaPreImageNetMean
cudaError_t cudaPreImageNetMean( float3* input, size_t inputWidth, size_t inputHeight,
float* output, size_t outputWidth, size_t outputHeight, const float3& mean_value )
{
if( !input || !output ){
std::cout << "error here. "<< std::endl;
return cudaErrorInvalidDevicePointer;
}
if( inputWidth == 0 || outputWidth == 0 || inputHeight == 0 || outputHeight == 0 ){
std::cout << "Or here. " << std::endl;
return cudaErrorInvalidValue;
}
const float2 scale = make_float2( float(inputWidth) / float(outputWidth),
float(inputHeight) / float(outputHeight) );
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(outputWidth,blockDim.x), iDivUp(outputHeight,blockDim.y));
gpuPreImageNetMean<<<gridDim, blockDim>>>(scale, input, inputWidth, output, outputWidth, outputHeight, mean_value);
return CUDA(cudaGetLastError());
}
__global__ void kernel_extract_roi(float* input, float* output, char* mean,
const int input_w, const int output_w, const int output_h,
const int in_plane_r, const int in_plane_g, const int in_plane_b,
const int out_plane_r, const int out_plane_g, const int out_plane_b,
const int bbox_x, const int bbox_y, const int bbox_w, const int bbox_h)
{
uint x = blockIdx.x * blockDim.x + threadIdx.x;
uint y = blockIdx.y * blockDim.y + threadIdx.y;
if( x < output_w && y < output_h)
{
float r[2] = { float(x) * bbox_w / output_w + bbox_x,
float(y) * bbox_h / output_h + bbox_y };
int pos[4][2] = { { int(floor(r[0])), int(floor(r[1])) },
{ int( ceil(r[0])), int(floor(r[1])) },
{ int(floor(r[0])), int(ceil(r[1])) },
{ int( ceil(r[0])), int(ceil(r[1])) } };
float u = r[0]-floor(r[0]);
float v = r[1]-floor(r[1]);
float s[4] = { (1-u)*(1-v), u*(1-v), (1-u)*v, u*v };
int map[4] = { pos[0][1]*input_w + pos[0][0], pos[1][1]*input_w + pos[1][0],
pos[2][1]*input_w + pos[2][0], pos[3][1]*input_w + pos[3][0]};
int idx = y * output_w + x;
output[idx+out_plane_r] = round( s[0]*input[map[0]+in_plane_r]
+ s[1]*input[map[1]+in_plane_r]
+ s[2]*input[map[2]+in_plane_r]
+ s[3]*input[map[3]+in_plane_r] );// float(mean[idx+out_plane_r]));
output[idx+out_plane_g] = round( s[0]*input[map[0]+in_plane_g]
+ s[1]*input[map[1]+in_plane_g]
+ s[2]*input[map[2]+in_plane_g]
+ s[3]*input[map[3]+in_plane_g] );//float(mean[idx+out_plane_g]));
output[idx+out_plane_b] = round( s[0]*input[map[0]+in_plane_b]
+ s[1]*input[map[1]+in_plane_b]
+ s[2]*input[map[2]+in_plane_b]
+ s[3]*input[map[3]+in_plane_b] );//float(mean[idx+out_plane_b]));
}
}
__global__ void kernelSoftmax( float* x, int channels, float* y)
{
extern __shared__ float mem[];
__shared__ float sum_value;
float number = *(x + blockDim.x*blockIdx.x + threadIdx.x);
float number_exp = __expf(number);
atomicAdd(&sum_value, number_exp);
__syncthreads();
y[blockDim.x*blockIdx.x + threadIdx.x] = __fdiv_rd(number_exp, sum_value);
}
void cudaSoftmax(int n, int channels, float* x, float*y)
{
kernelSoftmax<<< (n/channels), channels, channels*sizeof(float)>>>( x, channels, y);
cudaDeviceSynchronize();
}
|
3128fbcca020687dd2a51b6b33165f770ab948f3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <math.h>
#define N 1000000
__global__ void counts(float *x, float *y, int *results)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < N)
{
float result = x[tid] * x[tid] + y[tid] * y[tid];
if(result <= 1)
results[tid] = 1;
else
results[tid] = 0;
}
}
int main(void)
{
int T = 500; // threads per block
int B = 2000; // blocks per grid
int *dev_results, *host_results;
float *dev_x;
float *dev_y;
//host memory
host_results = (int *) calloc(N, sizeof(float));
//device memory
hipMalloc((void**)&dev_x, N * sizeof(float));
hipMalloc((void**)&dev_y, N * sizeof(float));
hipMalloc((void**)&dev_results, N * sizeof(int));
//random generator
hiprandGenerator_t gen;
hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(gen, 1234ULL);
//generate random numbers on device
hiprandGenerateUniform(gen, dev_x, N);
hiprandGenerateUniform(gen, dev_y, N);
//timmers
hipEvent_t start, stop;
float elapsedTime;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
//get results
hipLaunchKernelGGL(( counts), dim3(B),dim3(T), 0, 0, dev_x, dev_y, dev_results);
//stop timers
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventRecord(stop, 0);
hipEventElapsedTime(&elapsedTime, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
//copy results to host's memory
hipMemcpy(host_results, dev_results, N*sizeof(int), hipMemcpyDeviceToHost);
//sum the results
int counts = 0;
for(int i=0;i<N;i++)
{
if(host_results[i])
counts++;
printf("%d ", host_results[i]);
}
float pi = 4.0 * counts / N;
printf("Pi: %1.10f\n", pi);
printf("Execution Time: %1.10f\n", elapsedTime);
//cleanup
hiprandDestroyGenerator(gen);
hipFree(dev_x);
hipFree(dev_y);
hipFree(dev_results);
free(host_results);
return 0;
}
|
3128fbcca020687dd2a51b6b33165f770ab948f3.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <curand.h>
#include <math.h>
#define N 1000000
__global__ void counts(float *x, float *y, int *results)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < N)
{
float result = x[tid] * x[tid] + y[tid] * y[tid];
if(result <= 1)
results[tid] = 1;
else
results[tid] = 0;
}
}
int main(void)
{
int T = 500; // threads per block
int B = 2000; // blocks per grid
int *dev_results, *host_results;
float *dev_x;
float *dev_y;
//host memory
host_results = (int *) calloc(N, sizeof(float));
//device memory
cudaMalloc((void**)&dev_x, N * sizeof(float));
cudaMalloc((void**)&dev_y, N * sizeof(float));
cudaMalloc((void**)&dev_results, N * sizeof(int));
//random generator
curandGenerator_t gen;
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(gen, 1234ULL);
//generate random numbers on device
curandGenerateUniform(gen, dev_x, N);
curandGenerateUniform(gen, dev_y, N);
//timmers
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//get results
counts<<<B,T>>>(dev_x, dev_y, dev_results);
//stop timers
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventRecord(stop, 0);
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
//copy results to host's memory
cudaMemcpy(host_results, dev_results, N*sizeof(int), cudaMemcpyDeviceToHost);
//sum the results
int counts = 0;
for(int i=0;i<N;i++)
{
if(host_results[i])
counts++;
printf("%d ", host_results[i]);
}
float pi = 4.0 * counts / N;
printf("Pi: %1.10f\n", pi);
printf("Execution Time: %1.10f\n", elapsedTime);
//cleanup
curandDestroyGenerator(gen);
cudaFree(dev_x);
cudaFree(dev_y);
cudaFree(dev_results);
free(host_results);
return 0;
}
|
5b5e29806d223af735b53ee044fc84c7a9ee1a3f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHUNN.h"
#include "common.h"
#include "THHHalf.h"
#include "THHHalfAutoNumerics.cuh"
#include <stdio.h>
#include <assert.h>
static const int NTHREADS = 32;
template <typename Dtype>
__global__ void cunn_ClassNLLCriterion_updateOutput_kernel1(Dtype *output,
Dtype *total_weight,
Dtype *input,
THCIndex_t *target,
Dtype *weights,
int size_average,
int n_classes) {
assert(threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0);
// TODO: T4951791 Reuse code between updateOutput_kernel1 and
// updateOutput_kernel.
int t = (int)*target - TH_INDEX_BASE;
assert(t >= -1 && t < n_classes);
if (t >= 0) {
Dtype cur_weight = weights ? weights[t] : ScalarConvert<int, Dtype>::to(1);
*output = -cur_weight * input[t];
*total_weight = cur_weight;
if (size_average && *total_weight > 0) {
*output /= *total_weight;
}
}
}
template <typename Dtype, typename Acctype>
__global__ void cunn_ClassNLLCriterion_updateOutput_kernel(Dtype *output,
Dtype *total_weight,
Dtype *input,
THCIndex_t *target,
Dtype *weights,
int size_average,
int nframe,
int ndim,
int n_classes) {
__shared__ Acctype shInputs[NTHREADS], acc_weight[NTHREADS];
int i, t;
Dtype cur_weight;
shInputs[threadIdx.x] = ScalarConvert<int, Acctype>::to(0);
acc_weight[threadIdx.x] = ScalarConvert<int, Acctype>::to(0);
for (i = threadIdx.x; i < nframe; i += NTHREADS) {
t = target[i] - TH_INDEX_BASE;
assert(t >= -1 && t < n_classes);
if (t >= 0) {
cur_weight = weights ? weights[t] : ScalarConvert<int, Dtype>::to(1);
shInputs[threadIdx.x] -= input[i * ndim + t] * cur_weight;
acc_weight[threadIdx.x] += cur_weight;
}
}
__syncthreads();
// TODO: T4951791 Reuse code between updateOutput_kernel1 and
// updateOutput_kernel
if (threadIdx.x == 0) {
*output = *total_weight = ScalarConvert<int, Dtype>::to(0);
Acctype outputAcc = 0;
Acctype total_weightAcc = 0;
for (i = 0; i < NTHREADS; ++i){
// FIXME should we do somethigng here
outputAcc += shInputs[i];
total_weightAcc += acc_weight[i];
}
*total_weight = ScalarConvert<Acctype, Dtype>::to(total_weightAcc);
*output = ScalarConvert<Acctype, Dtype>::to(outputAcc);
if (size_average && *total_weight > 0) {
*output = ScalarConvert<Acctype, Dtype>::to(outputAcc / total_weightAcc);
}
}
}
template <typename Dtype>
__global__ void cunn_ClassNLLCriterion_updateGradInput_kernel1(
Dtype* gradInput,
Dtype* weights,
THCIndex_t* target,
Dtype* total_weight,
int size_average,
int n_classes)
{
if (*total_weight <= 0) {
return;
}
Dtype norm = size_average ? (ScalarConvert<int, Dtype>::to(1) / *total_weight) : ScalarConvert<int, Dtype>::to(1);
int t = (int)*target - TH_INDEX_BASE;
assert(t >= -1 && t < n_classes);
if (t >= 0) {
gradInput[t] = -(weights ? weights[t] : ScalarConvert<int, Dtype>::to(1)) * norm;
}
}
template <typename Dtype>
__global__ void cunn_ClassNLLCriterion_updateGradInput_kernel(
Dtype *gradInput,
THCIndex_t *target,
Dtype *weights,
Dtype *total_weight,
int size_average,
int nframe,
int ndim,
int n_classes)
{
if (*total_weight <= 0) {
return;
}
int i, t;
Dtype norm = size_average ? (ScalarConvert<int, Dtype>::to(1) / *total_weight) : ScalarConvert<int, Dtype>::to(1);
for (i = threadIdx.x; i < nframe; i += NTHREADS) {
t = (int)target[i] - TH_INDEX_BASE;
assert(t >= -1 && t < n_classes);
if (t >= 0) {
gradInput[i * ndim + t] = -(weights ? weights[t] : ScalarConvert<int, Dtype>::to(1)) * norm;
}
}
}
#include "generic/ClassNLLCriterion.cu"
#include "THHGenerateFloatTypes.h"
|
5b5e29806d223af735b53ee044fc84c7a9ee1a3f.cu
|
#include "THCUNN.h"
#include "common.h"
#include "THCHalf.h"
#include "THCHalfAutoNumerics.cuh"
#include <stdio.h>
#include <assert.h>
static const int NTHREADS = 32;
template <typename Dtype>
__global__ void cunn_ClassNLLCriterion_updateOutput_kernel1(Dtype *output,
Dtype *total_weight,
Dtype *input,
THCIndex_t *target,
Dtype *weights,
int size_average,
int n_classes) {
assert(threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0);
// TODO: T4951791 Reuse code between updateOutput_kernel1 and
// updateOutput_kernel.
int t = (int)*target - TH_INDEX_BASE;
assert(t >= -1 && t < n_classes);
if (t >= 0) {
Dtype cur_weight = weights ? weights[t] : ScalarConvert<int, Dtype>::to(1);
*output = -cur_weight * input[t];
*total_weight = cur_weight;
if (size_average && *total_weight > 0) {
*output /= *total_weight;
}
}
}
template <typename Dtype, typename Acctype>
__global__ void cunn_ClassNLLCriterion_updateOutput_kernel(Dtype *output,
Dtype *total_weight,
Dtype *input,
THCIndex_t *target,
Dtype *weights,
int size_average,
int nframe,
int ndim,
int n_classes) {
__shared__ Acctype shInputs[NTHREADS], acc_weight[NTHREADS];
int i, t;
Dtype cur_weight;
shInputs[threadIdx.x] = ScalarConvert<int, Acctype>::to(0);
acc_weight[threadIdx.x] = ScalarConvert<int, Acctype>::to(0);
for (i = threadIdx.x; i < nframe; i += NTHREADS) {
t = target[i] - TH_INDEX_BASE;
assert(t >= -1 && t < n_classes);
if (t >= 0) {
cur_weight = weights ? weights[t] : ScalarConvert<int, Dtype>::to(1);
shInputs[threadIdx.x] -= input[i * ndim + t] * cur_weight;
acc_weight[threadIdx.x] += cur_weight;
}
}
__syncthreads();
// TODO: T4951791 Reuse code between updateOutput_kernel1 and
// updateOutput_kernel
if (threadIdx.x == 0) {
*output = *total_weight = ScalarConvert<int, Dtype>::to(0);
Acctype outputAcc = 0;
Acctype total_weightAcc = 0;
for (i = 0; i < NTHREADS; ++i){
// FIXME should we do somethigng here
outputAcc += shInputs[i];
total_weightAcc += acc_weight[i];
}
*total_weight = ScalarConvert<Acctype, Dtype>::to(total_weightAcc);
*output = ScalarConvert<Acctype, Dtype>::to(outputAcc);
if (size_average && *total_weight > 0) {
*output = ScalarConvert<Acctype, Dtype>::to(outputAcc / total_weightAcc);
}
}
}
template <typename Dtype>
__global__ void cunn_ClassNLLCriterion_updateGradInput_kernel1(
Dtype* gradInput,
Dtype* weights,
THCIndex_t* target,
Dtype* total_weight,
int size_average,
int n_classes)
{
if (*total_weight <= 0) {
return;
}
Dtype norm = size_average ? (ScalarConvert<int, Dtype>::to(1) / *total_weight) : ScalarConvert<int, Dtype>::to(1);
int t = (int)*target - TH_INDEX_BASE;
assert(t >= -1 && t < n_classes);
if (t >= 0) {
gradInput[t] = -(weights ? weights[t] : ScalarConvert<int, Dtype>::to(1)) * norm;
}
}
template <typename Dtype>
__global__ void cunn_ClassNLLCriterion_updateGradInput_kernel(
Dtype *gradInput,
THCIndex_t *target,
Dtype *weights,
Dtype *total_weight,
int size_average,
int nframe,
int ndim,
int n_classes)
{
if (*total_weight <= 0) {
return;
}
int i, t;
Dtype norm = size_average ? (ScalarConvert<int, Dtype>::to(1) / *total_weight) : ScalarConvert<int, Dtype>::to(1);
for (i = threadIdx.x; i < nframe; i += NTHREADS) {
t = (int)target[i] - TH_INDEX_BASE;
assert(t >= -1 && t < n_classes);
if (t >= 0) {
gradInput[i * ndim + t] = -(weights ? weights[t] : ScalarConvert<int, Dtype>::to(1)) * norm;
}
}
}
#include "generic/ClassNLLCriterion.cu"
#include "THCGenerateFloatTypes.h"
|
192e5189d1b2e2ad905332661601cd3fe7028562.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _VECTOR_ADDITION_KERNEL_H_
#define _VECTOR_ADDITION_KERNEL_H_
__global__ void vector_addition_kernel(float *A, float *B, float *C, int num_elements)
{
int thread_id = threadIdx.x; /* Obtain index of thread within thread block */
if (thread_id >= num_elements)
return;
C[thread_id] = A[thread_id] + B[thread_id];
return;
}
#endif /* #ifndef _VECTOR_ADDITION_KERNEL_H */
|
192e5189d1b2e2ad905332661601cd3fe7028562.cu
|
#ifndef _VECTOR_ADDITION_KERNEL_H_
#define _VECTOR_ADDITION_KERNEL_H_
__global__ void vector_addition_kernel(float *A, float *B, float *C, int num_elements)
{
int thread_id = threadIdx.x; /* Obtain index of thread within thread block */
if (thread_id >= num_elements)
return;
C[thread_id] = A[thread_id] + B[thread_id];
return;
}
#endif /* #ifndef _VECTOR_ADDITION_KERNEL_H */
|
b863a63fc67cbccdc5bbcac17d5818398b70ddd4.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* --------------------------------------------------------
* Universidad del Valle de Guatemala
* CC3056 - Programacin de Microprocesadores
* --------------------------------------------------------
* Proyecto3.cu
* --------------------------------------------------------
* Implementacion de operaciones de reduccion en cuda
* y Calculo de la media en un conjunto de datos.
* --------------------------------------------------------
* Autores:
* - Diego Cordova, 20212
* - Alejandro Gomez, 20347
* - Paola de Leon, 20361
*
* Fecha de modificacion: 2021/11/23
* --------------------------------------------------------
*/
// Llamado a librerias
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
// Blocksize definido
#define BLOCKSIZE 300
// GLOBAL: funcion llamada desde el host y ejecutada en el device (kernel)
__global__ void suma_total(int *a, int *b, int *c, int *d)
{
int myID = threadIdx.x + blockDim.x * blockIdx.x;
if(myID < *d)
c[myID] = a[myID] + b[myID];
}
int main(void) {
int SIZE_1 = 25000;
int SIZE_2 = 15000;
float init_1 = (float) SIZE_1;
float init_2 = (float) SIZE_2;
//---------------- Inicializacion de Streams ----------------
hipStream_t stream1, stream2;
hipStreamCreate(&stream1);
hipStreamCreate(&stream2);
//---------------- Inicializacion de memoria ----------------
// memoria en host para los streams
int *a1, *b1, *c1, *d1;
int *a2, *b2, *c2, *d2;
// memoria en device para los streams
int *dev_a1, *dev_b1, *dev_c1, *dev_d1;
int *dev_a2, *dev_b2, *dev_c2, *dev_d2;
// --------------- Inicializacion de datos ---------------
hipHostMalloc((void**)&c1, SIZE_1 * sizeof(int), hipHostMallocDefault);
hipHostMalloc((void**)&c2, SIZE_2 * sizeof(int), hipHostMallocDefault);
printf("\nGenerando %d numeros aleatorios para el arreglo 1...\n", SIZE_1);
printf("Generando %d numeros aleatorios para el arreglo 2...\n", SIZE_2);
int i = 0;
for(i = 0; i < SIZE_1; i++)
{
// Se generan numeros aleatorios entre 100 y 1100
c1[i] = rand() % 1100 + 100;
}
for(i = 0; i < SIZE_2; i++)
{
// Se generan numeros aleatorios entre 0 y 3500
c2[i] = rand() % 3500;
}
printf("\nCalculando suma de los dos arreglos...\n");
while ((SIZE_1 != 1) || (SIZE_2 != 1))
{
if (SIZE_1 != 1)
{
//---------------- Calculo de tamano de nuevo arreglo ----------------
int limit = 0;
if (SIZE_1 % 2 == 0)
{
limit = SIZE_1 / 2;
}
else
{
limit = (SIZE_1 + 1) / 2;
}
//---------------- Memory Allocation ----------------
// Host
hipHostMalloc((void**)&a1,limit*sizeof(int), hipHostMallocDefault);
hipHostMalloc((void**)&b1,limit*sizeof(int), hipHostMallocDefault);
hipHostMalloc((void**)&d1,sizeof(int), hipHostMallocDefault);
// Device
hipMalloc((void**)&dev_a1, limit * sizeof(int));
hipMalloc((void**)&dev_b1, limit * sizeof(int));
hipMalloc((void**)&dev_c1, limit * sizeof(int));
hipMalloc((void**)&dev_d1, sizeof(int));
//---------------- Reordenamiento de datos ----------------
for(i = 0; i < (2 * limit); i++)
{
if (i < limit)
{
a1[i] = c1[i];
}
else
{
if (i < SIZE_1)
{
b1[i - limit] = c1[i];
}
else
{
b1[i - limit] = 0;
}
}
}
// Liberacion parcial de c1 y reasignacion a memoria
hipFree(c1);
hipHostMalloc((void**)&c1,limit*sizeof(int), hipHostMallocDefault);
SIZE_1 = limit; // Se asigna nuevo tamano de arreglo
*d1 = limit; // Se copia el tamano de arreglo a d1
//Calculo correcto del nmero de hilos por bloque
int bloques = SIZE_1 / BLOCKSIZE;
if(SIZE_1 % BLOCKSIZE != 0)
{
bloques = bloques + 1;
}
int hilos = BLOCKSIZE;
// --------------------- Kernel ---------------------
// Copida de parametros a device
hipMemcpyAsync(dev_a1, a1, SIZE_1*sizeof(int), hipMemcpyHostToDevice, stream1);
hipMemcpyAsync(dev_b1, b1, SIZE_1*sizeof(int), hipMemcpyHostToDevice, stream1);
hipMemcpyAsync(dev_d1, d1, sizeof(int), hipMemcpyHostToDevice, stream1);
// Ejecucion de Kernel
hipLaunchKernelGGL(( suma_total) , dim3(bloques), dim3(hilos), 0, stream1 , dev_a1, dev_b1, dev_c1, dev_d1);
// Copia de resultado a host
hipMemcpyAsync(c1, dev_c1, SIZE_1*sizeof(int), hipMemcpyDeviceToHost, stream1);
hipStreamSynchronize(stream1); // wait for stream1 to finish
// --------------- Liberacion de memoria ---------------
hipFree(a1);
hipFree(b1);
hipFree(dev_a1);
hipFree(dev_b1);
hipFree(dev_c1);
hipFree(dev_d1);
}
if (SIZE_2 != 1)
{
//---------------- Calculo de tamano de nuevo arreglo ----------------
int limit = 0;
if (SIZE_2 % 2 == 0)
{
limit = SIZE_2 / 2;
}
else
{
limit = (SIZE_2 + 1) / 2;
}
//---------------- Memory Allocation ----------------
// Host
hipHostMalloc((void**)&a2, limit * sizeof(int), hipHostMallocDefault);
hipHostMalloc((void**)&b2, limit * sizeof(int), hipHostMallocDefault);
hipHostMalloc((void**)&d2, sizeof(int), hipHostMallocDefault);
// Device
hipMalloc((void**)&dev_a2, limit * sizeof(int));
hipMalloc((void**)&dev_b2, limit * sizeof(int));
hipMalloc((void**)&dev_c2, limit * sizeof(int));
hipMalloc((void**)&dev_d2, sizeof(int));
//---------------- Reordenamiento de datos ----------------
for(i = 0; i < (2 * limit); i++)
{
if (i < limit)
{
a2[i] = c2[i];
}
else
{
if (i < SIZE_2)
{
b2[i - limit] = c2[i];
}
else
{
b2[i - limit] = 0;
}
}
}
// Liberacion parcial de c2 y reasignacion a memoria
hipFree(c2);
hipHostMalloc((void**)&c2, limit * sizeof(int), hipHostMallocDefault);
SIZE_2 = limit; // Se asigna nuevo tamano de arreglo
*d2 = limit; // Se copia el tamano de arreglo a d2
//Calculo correcto del nmero de hilos por bloque
int bloques = SIZE_2 / BLOCKSIZE;
if(SIZE_2 % BLOCKSIZE != 0)
{
bloques = bloques + 1;
}
int hilos = BLOCKSIZE;
// --------------------- Kernel ---------------------
// Copida de parametros a device
hipMemcpyAsync(dev_a2, a2, SIZE_2 * sizeof(int), hipMemcpyHostToDevice, stream2);
hipMemcpyAsync(dev_b2, b2, SIZE_2 * sizeof(int), hipMemcpyHostToDevice, stream2);
hipMemcpyAsync(dev_d2, d2, sizeof(int), hipMemcpyHostToDevice, stream2);
// Ejecucion de Kernel
hipLaunchKernelGGL(( suma_total) , dim3(bloques), dim3(hilos), 0, stream2 , dev_a2, dev_b2, dev_c2, dev_d2);
// Copia de resultado a host
hipMemcpyAsync(c2, dev_c2, SIZE_2 * sizeof(int), hipMemcpyDeviceToHost, stream2);
hipStreamSynchronize(stream2); // wait for stream2 to finish
// --------------- Liberacion de memoria ---------------
hipFree(a2);
hipFree(b2);
hipFree(dev_a2);
hipFree(dev_b2);
hipFree(dev_c2);
hipFree(dev_d2);
}
}
// --------------- Impresion de resultados ---------------
float suma = (float) c1[0];
float media = suma / init_1;
printf("\n--------- Arreglo 1 ---------");
printf("\n-> La suma total de datos es: %d", c1[0]);
printf("\n-> La media del arreglo 1 es: %lf\n\n", media);
suma = (float) c2[0];
media = suma / init_2;
printf("\n--------- Arreglo 2 ---------");
printf("\n-> La suma total de datos es: %d", c2[0]);
printf("\n-> La media del arreglo 2 es: %lf\n\n", media);
// --------------- Liberacion final de memoria ---------------
hipFree(c1);
hipFree(d1);
hipFree(c2);
hipFree(d2);
hipStreamDestroy(stream2);
hipStreamDestroy(stream1);
return 0;
}
|
b863a63fc67cbccdc5bbcac17d5818398b70ddd4.cu
|
/**
* --------------------------------------------------------
* Universidad del Valle de Guatemala
* CC3056 - Programación de Microprocesadores
* --------------------------------------------------------
* Proyecto3.cu
* --------------------------------------------------------
* Implementacion de operaciones de reduccion en cuda
* y Calculo de la media en un conjunto de datos.
* --------------------------------------------------------
* Autores:
* - Diego Cordova, 20212
* - Alejandro Gomez, 20347
* - Paola de Leon, 20361
*
* Fecha de modificacion: 2021/11/23
* --------------------------------------------------------
*/
// Llamado a librerias
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
// Blocksize definido
#define BLOCKSIZE 300
// GLOBAL: funcion llamada desde el host y ejecutada en el device (kernel)
__global__ void suma_total(int *a, int *b, int *c, int *d)
{
int myID = threadIdx.x + blockDim.x * blockIdx.x;
if(myID < *d)
c[myID] = a[myID] + b[myID];
}
int main(void) {
int SIZE_1 = 25000;
int SIZE_2 = 15000;
float init_1 = (float) SIZE_1;
float init_2 = (float) SIZE_2;
//---------------- Inicializacion de Streams ----------------
cudaStream_t stream1, stream2;
cudaStreamCreate(&stream1);
cudaStreamCreate(&stream2);
//---------------- Inicializacion de memoria ----------------
// memoria en host para los streams
int *a1, *b1, *c1, *d1;
int *a2, *b2, *c2, *d2;
// memoria en device para los streams
int *dev_a1, *dev_b1, *dev_c1, *dev_d1;
int *dev_a2, *dev_b2, *dev_c2, *dev_d2;
// --------------- Inicializacion de datos ---------------
cudaHostAlloc((void**)&c1, SIZE_1 * sizeof(int), cudaHostAllocDefault);
cudaHostAlloc((void**)&c2, SIZE_2 * sizeof(int), cudaHostAllocDefault);
printf("\nGenerando %d numeros aleatorios para el arreglo 1...\n", SIZE_1);
printf("Generando %d numeros aleatorios para el arreglo 2...\n", SIZE_2);
int i = 0;
for(i = 0; i < SIZE_1; i++)
{
// Se generan numeros aleatorios entre 100 y 1100
c1[i] = rand() % 1100 + 100;
}
for(i = 0; i < SIZE_2; i++)
{
// Se generan numeros aleatorios entre 0 y 3500
c2[i] = rand() % 3500;
}
printf("\nCalculando suma de los dos arreglos...\n");
while ((SIZE_1 != 1) || (SIZE_2 != 1))
{
if (SIZE_1 != 1)
{
//---------------- Calculo de tamano de nuevo arreglo ----------------
int limit = 0;
if (SIZE_1 % 2 == 0)
{
limit = SIZE_1 / 2;
}
else
{
limit = (SIZE_1 + 1) / 2;
}
//---------------- Memory Allocation ----------------
// Host
cudaHostAlloc((void**)&a1,limit*sizeof(int), cudaHostAllocDefault);
cudaHostAlloc((void**)&b1,limit*sizeof(int), cudaHostAllocDefault);
cudaHostAlloc((void**)&d1,sizeof(int), cudaHostAllocDefault);
// Device
cudaMalloc((void**)&dev_a1, limit * sizeof(int));
cudaMalloc((void**)&dev_b1, limit * sizeof(int));
cudaMalloc((void**)&dev_c1, limit * sizeof(int));
cudaMalloc((void**)&dev_d1, sizeof(int));
//---------------- Reordenamiento de datos ----------------
for(i = 0; i < (2 * limit); i++)
{
if (i < limit)
{
a1[i] = c1[i];
}
else
{
if (i < SIZE_1)
{
b1[i - limit] = c1[i];
}
else
{
b1[i - limit] = 0;
}
}
}
// Liberacion parcial de c1 y reasignacion a memoria
cudaFree(c1);
cudaHostAlloc((void**)&c1,limit*sizeof(int), cudaHostAllocDefault);
SIZE_1 = limit; // Se asigna nuevo tamano de arreglo
*d1 = limit; // Se copia el tamano de arreglo a d1
//Calculo correcto del número de hilos por bloque
int bloques = SIZE_1 / BLOCKSIZE;
if(SIZE_1 % BLOCKSIZE != 0)
{
bloques = bloques + 1;
}
int hilos = BLOCKSIZE;
// --------------------- Kernel ---------------------
// Copida de parametros a device
cudaMemcpyAsync(dev_a1, a1, SIZE_1*sizeof(int), cudaMemcpyHostToDevice, stream1);
cudaMemcpyAsync(dev_b1, b1, SIZE_1*sizeof(int), cudaMemcpyHostToDevice, stream1);
cudaMemcpyAsync(dev_d1, d1, sizeof(int), cudaMemcpyHostToDevice, stream1);
// Ejecucion de Kernel
suma_total <<< bloques, hilos, 0, stream1 >>> (dev_a1, dev_b1, dev_c1, dev_d1);
// Copia de resultado a host
cudaMemcpyAsync(c1, dev_c1, SIZE_1*sizeof(int), cudaMemcpyDeviceToHost, stream1);
cudaStreamSynchronize(stream1); // wait for stream1 to finish
// --------------- Liberacion de memoria ---------------
cudaFree(a1);
cudaFree(b1);
cudaFree(dev_a1);
cudaFree(dev_b1);
cudaFree(dev_c1);
cudaFree(dev_d1);
}
if (SIZE_2 != 1)
{
//---------------- Calculo de tamano de nuevo arreglo ----------------
int limit = 0;
if (SIZE_2 % 2 == 0)
{
limit = SIZE_2 / 2;
}
else
{
limit = (SIZE_2 + 1) / 2;
}
//---------------- Memory Allocation ----------------
// Host
cudaHostAlloc((void**)&a2, limit * sizeof(int), cudaHostAllocDefault);
cudaHostAlloc((void**)&b2, limit * sizeof(int), cudaHostAllocDefault);
cudaHostAlloc((void**)&d2, sizeof(int), cudaHostAllocDefault);
// Device
cudaMalloc((void**)&dev_a2, limit * sizeof(int));
cudaMalloc((void**)&dev_b2, limit * sizeof(int));
cudaMalloc((void**)&dev_c2, limit * sizeof(int));
cudaMalloc((void**)&dev_d2, sizeof(int));
//---------------- Reordenamiento de datos ----------------
for(i = 0; i < (2 * limit); i++)
{
if (i < limit)
{
a2[i] = c2[i];
}
else
{
if (i < SIZE_2)
{
b2[i - limit] = c2[i];
}
else
{
b2[i - limit] = 0;
}
}
}
// Liberacion parcial de c2 y reasignacion a memoria
cudaFree(c2);
cudaHostAlloc((void**)&c2, limit * sizeof(int), cudaHostAllocDefault);
SIZE_2 = limit; // Se asigna nuevo tamano de arreglo
*d2 = limit; // Se copia el tamano de arreglo a d2
//Calculo correcto del número de hilos por bloque
int bloques = SIZE_2 / BLOCKSIZE;
if(SIZE_2 % BLOCKSIZE != 0)
{
bloques = bloques + 1;
}
int hilos = BLOCKSIZE;
// --------------------- Kernel ---------------------
// Copida de parametros a device
cudaMemcpyAsync(dev_a2, a2, SIZE_2 * sizeof(int), cudaMemcpyHostToDevice, stream2);
cudaMemcpyAsync(dev_b2, b2, SIZE_2 * sizeof(int), cudaMemcpyHostToDevice, stream2);
cudaMemcpyAsync(dev_d2, d2, sizeof(int), cudaMemcpyHostToDevice, stream2);
// Ejecucion de Kernel
suma_total <<< bloques, hilos, 0, stream2 >>> (dev_a2, dev_b2, dev_c2, dev_d2);
// Copia de resultado a host
cudaMemcpyAsync(c2, dev_c2, SIZE_2 * sizeof(int), cudaMemcpyDeviceToHost, stream2);
cudaStreamSynchronize(stream2); // wait for stream2 to finish
// --------------- Liberacion de memoria ---------------
cudaFree(a2);
cudaFree(b2);
cudaFree(dev_a2);
cudaFree(dev_b2);
cudaFree(dev_c2);
cudaFree(dev_d2);
}
}
// --------------- Impresion de resultados ---------------
float suma = (float) c1[0];
float media = suma / init_1;
printf("\n--------- Arreglo 1 ---------");
printf("\n-> La suma total de datos es: %d", c1[0]);
printf("\n-> La media del arreglo 1 es: %lf\n\n", media);
suma = (float) c2[0];
media = suma / init_2;
printf("\n--------- Arreglo 2 ---------");
printf("\n-> La suma total de datos es: %d", c2[0]);
printf("\n-> La media del arreglo 2 es: %lf\n\n", media);
// --------------- Liberacion final de memoria ---------------
cudaFree(c1);
cudaFree(d1);
cudaFree(c2);
cudaFree(d2);
cudaStreamDestroy(stream2);
cudaStreamDestroy(stream1);
return 0;
}
|
264cebe189c3ae32e6a03e75ea2e7faa1ef29a3f.hip
|
// !!! This is a file automatically generated by hipify!!!
// Includes
#include <stdio.h>
#include <stdlib.h>
// includes CUDA
#include <hip/hip_runtime.h>
// includes, project
#include "../include/REPEATR.h"
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 640
#define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS
#define LINE_SIZE 12
// Variables
int* h_A;
int* h_B;
int* h_C;
int* d_A;
int* d_B;
int* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line ){
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal(int* A, int* C, int iterations){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
int sum=0;
// Fill the L1 cache, Miss on every iteration
for (int i=0; i<iterations ; i++){
//REPLACE_ITERATIONS
REPEAT_L6(0);
}
C[0]=sum;
__syncthreads();
}
// Host code
int main(int argc, char** argv)
{
int iterations;
if (argc != 2){
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else{
iterations = atoi(argv[1]);
}
printf("Power Microbenchmark with %d iterations\n",iterations);
int N = (400*max_tid*LINE_SIZE);
size_t size = N * sizeof(int) ;
// Allocate input vectors h_A and h_B in host memory
h_A = (int*)malloc(size);
if (h_A == 0) CleanupResources();
// h_B = (int*)malloc(size);
// if (h_B == 0) CleanupResources();
h_C = (int*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
// RandomInit(h_B, N);
// Allocate vectors in device memory
checkCudaErrors( hipMalloc((void**)&d_A, size) );
// checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
hipEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
// checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) );
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
checkCudaErrors(hipEventRecord(start));
hipLaunchKernelGGL(( PowerKernal), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_C, iterations);
checkCudaErrors(hipEventRecord(stop));
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop));
printf("gpu execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
hipDeviceSynchronize();
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
CleanupResources();
return 0;
}
void CleanupResources(void){
// Free device memory
if (d_A)
hipFree(d_A);
//if (d_B)
// hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
// if (h_B)
// free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(int* data, int n){
for (int i = 0; i < n; ++i)
data[i] = (int)(rand() / RAND_MAX);
}
|
264cebe189c3ae32e6a03e75ea2e7faa1ef29a3f.cu
|
// Includes
#include <stdio.h>
#include <stdlib.h>
// includes CUDA
#include <cuda_runtime.h>
// includes, project
#include "../include/REPEATR.h"
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 640
#define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS
#define LINE_SIZE 12
// Variables
int* h_A;
int* h_B;
int* h_C;
int* d_A;
int* d_B;
int* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line ){
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal(int* A, int* C, int iterations){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
int sum=0;
// Fill the L1 cache, Miss on every iteration
for (int i=0; i<iterations ; i++){
//REPLACE_ITERATIONS
REPEAT_L6(0);
}
C[0]=sum;
__syncthreads();
}
// Host code
int main(int argc, char** argv)
{
int iterations;
if (argc != 2){
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else{
iterations = atoi(argv[1]);
}
printf("Power Microbenchmark with %d iterations\n",iterations);
int N = (400*max_tid*LINE_SIZE);
size_t size = N * sizeof(int) ;
// Allocate input vectors h_A and h_B in host memory
h_A = (int*)malloc(size);
if (h_A == 0) CleanupResources();
// h_B = (int*)malloc(size);
// if (h_B == 0) CleanupResources();
h_C = (int*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
// RandomInit(h_B, N);
// Allocate vectors in device memory
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
// checkCudaErrors( cudaMalloc((void**)&d_B, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
cudaEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
// checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
checkCudaErrors(cudaEventRecord(start));
PowerKernal<<<dimGrid,dimBlock>>>(d_A, d_C, iterations);
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop));
printf("gpu execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
cudaThreadSynchronize();
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
CleanupResources();
return 0;
}
void CleanupResources(void){
// Free device memory
if (d_A)
cudaFree(d_A);
//if (d_B)
// cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
// if (h_B)
// free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(int* data, int n){
for (int i = 0; i < n; ++i)
data[i] = (int)(rand() / RAND_MAX);
}
|
c98768433ad0915385682742732f10aad1c3f70d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <opencv2/flann/random.h>
#include "compare.h"
#include "gputimer.h"
const int ARRAY_SIZE = 4096 * 100;
const int BLOCK_SIZE = 256;
// Reference
__global__ void smooth(float *v_new, const float *v) {
int myIdx = threadIdx.x * gridDim.x + blockIdx.x;
int numThreads = blockDim.x * gridDim.x;
int myLeftIdx = (myIdx == 0) ? 0 : myIdx - 1;
int myRightIdx = (myIdx == (numThreads - 1)) ? numThreads - 1 : myIdx + 1;
float myElt = v[myIdx];
float myLeftElt = v[myLeftIdx];
float myRightElt = v[myRightIdx];
v_new[myIdx] = 0.25f * myLeftElt + 0.5f * myElt + 0.25f * myRightElt;
}
// Your code
__global__ void smooth_shared(float *v_new, const float *v) {
extern __shared__ float s[];
// TODO: Fill in the rest of this function
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int local_tid = threadIdx.x;
// copyshared mem
s[1 + local_tid] = v[tid];
if (local_tid == 0) {
s[0] = tid > 0 ? v[tid - 1] : v[tid];
}
if (local_tid == blockDim.x - 1) {
// printf("%d %d %f\n", tid, local_tid, v[tid + 1]);
// ********** 1 + local_tid + 11+local_tid+1halo
s[1 + local_tid + 1] = tid < ARRAY_SIZE - 1 ? v[tid + 1] : v[tid];
}
__syncthreads();
//
local_tid += 1;
v_new[tid] = 0.25f * s[local_tid - 1] + 0.5f * s[local_tid] + 0.25f * s[local_tid + 1];
}
void smooth_wrapper() {
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h_in[ARRAY_SIZE];
float h_cmp[ARRAY_SIZE];
float h_out[ARRAY_SIZE];
float h_out_shared[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
// generate random float in [0, 1]
h_in[i] = (float) random() / (float) RAND_MAX;
}
for (int i = 0; i < ARRAY_SIZE; i++) {
h_cmp[i] = (0.25f * h_in[(i == 0) ? 0 : i - 1] +
0.50f * h_in[i] +
0.25f * h_in[(i == (ARRAY_SIZE - 1)) ? ARRAY_SIZE - 1 : i + 1]);
}
// declare GPU memory pointers
float *d_in, *d_out, *d_out_shared;
// allocate GPU memory
hipMalloc((void **) &d_in, ARRAY_BYTES);
hipMalloc((void **) &d_out, ARRAY_BYTES);
hipMalloc((void **) &d_out_shared, ARRAY_BYTES);
// transfer the input array to the GPU
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
// hipEvent_t start, stop;
// hipEventCreate(&start);
// hipEventCreate(&stop);
// launch the kernel
smooth << < ARRAY_SIZE / BLOCK_SIZE, BLOCK_SIZE >> > (d_out, d_in);
GpuTimer timer;
timer.Start();
smooth_shared << < ARRAY_SIZE / BLOCK_SIZE, BLOCK_SIZE, (BLOCK_SIZE + 2) * sizeof(float) >> > (d_out_shared, d_in);
// smooth << < ARRAY_SIZE / BLOCK_SIZE, BLOCK_SIZE, (BLOCK_SIZE + 2) * sizeof(float) >> > (d_out_shared, d_in);
timer.Stop();
printf("Your code executed in %g ms\n", timer.Elapsed());
// hipEventSynchronize(stop);
// float elapsedTime;
// hipEventElapsedTime(&elapsedTime, start, stop);
// copy back the result from GPU
hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost);
hipMemcpy(h_out_shared, d_out_shared, ARRAY_BYTES, hipMemcpyDeviceToHost);
// testing for correctness
compare(h_in, h_out, h_out_shared, h_cmp, ARRAY_SIZE);
// free GPU memory allocation
hipFree(d_in);
hipFree(d_out);
hipFree(d_out_shared);
}
|
c98768433ad0915385682742732f10aad1c3f70d.cu
|
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <opencv2/flann/random.h>
#include "compare.h"
#include "gputimer.h"
const int ARRAY_SIZE = 4096 * 100;
const int BLOCK_SIZE = 256;
// Reference
__global__ void smooth(float *v_new, const float *v) {
int myIdx = threadIdx.x * gridDim.x + blockIdx.x;
int numThreads = blockDim.x * gridDim.x;
int myLeftIdx = (myIdx == 0) ? 0 : myIdx - 1;
int myRightIdx = (myIdx == (numThreads - 1)) ? numThreads - 1 : myIdx + 1;
float myElt = v[myIdx];
float myLeftElt = v[myLeftIdx];
float myRightElt = v[myRightIdx];
v_new[myIdx] = 0.25f * myLeftElt + 0.5f * myElt + 0.25f * myRightElt;
}
// Your code
__global__ void smooth_shared(float *v_new, const float *v) {
extern __shared__ float s[];
// TODO: Fill in the rest of this function
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int local_tid = threadIdx.x;
// copy数据到shared mem中,并处理边界
s[1 + local_tid] = v[tid];
if (local_tid == 0) {
s[0] = tid > 0 ? v[tid - 1] : v[tid];
}
if (local_tid == blockDim.x - 1) {
// printf("%d %d %f\n", tid, local_tid, v[tid + 1]);
// ********** 注意 这里是1 + local_tid + 1,其中1+local_tid是正常的最后一个数据的位置,再+1之后是halo的位置
s[1 + local_tid + 1] = tid < ARRAY_SIZE - 1 ? v[tid + 1] : v[tid];
}
__syncthreads();
// 计算
local_tid += 1;
v_new[tid] = 0.25f * s[local_tid - 1] + 0.5f * s[local_tid] + 0.25f * s[local_tid + 1];
}
void smooth_wrapper() {
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h_in[ARRAY_SIZE];
float h_cmp[ARRAY_SIZE];
float h_out[ARRAY_SIZE];
float h_out_shared[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
// generate random float in [0, 1]
h_in[i] = (float) random() / (float) RAND_MAX;
}
for (int i = 0; i < ARRAY_SIZE; i++) {
h_cmp[i] = (0.25f * h_in[(i == 0) ? 0 : i - 1] +
0.50f * h_in[i] +
0.25f * h_in[(i == (ARRAY_SIZE - 1)) ? ARRAY_SIZE - 1 : i + 1]);
}
// declare GPU memory pointers
float *d_in, *d_out, *d_out_shared;
// allocate GPU memory
cudaMalloc((void **) &d_in, ARRAY_BYTES);
cudaMalloc((void **) &d_out, ARRAY_BYTES);
cudaMalloc((void **) &d_out_shared, ARRAY_BYTES);
// transfer the input array to the GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
// cudaEvent_t start, stop;
// cudaEventCreate(&start);
// cudaEventCreate(&stop);
// launch the kernel
smooth << < ARRAY_SIZE / BLOCK_SIZE, BLOCK_SIZE >> > (d_out, d_in);
GpuTimer timer;
timer.Start();
smooth_shared << < ARRAY_SIZE / BLOCK_SIZE, BLOCK_SIZE, (BLOCK_SIZE + 2) * sizeof(float) >> > (d_out_shared, d_in);
// smooth << < ARRAY_SIZE / BLOCK_SIZE, BLOCK_SIZE, (BLOCK_SIZE + 2) * sizeof(float) >> > (d_out_shared, d_in);
timer.Stop();
printf("Your code executed in %g ms\n", timer.Elapsed());
// cudaEventSynchronize(stop);
// float elapsedTime;
// cudaEventElapsedTime(&elapsedTime, start, stop);
// copy back the result from GPU
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(h_out_shared, d_out_shared, ARRAY_BYTES, cudaMemcpyDeviceToHost);
// testing for correctness
compare(h_in, h_out, h_out_shared, h_cmp, ARRAY_SIZE);
// free GPU memory allocation
cudaFree(d_in);
cudaFree(d_out);
cudaFree(d_out_shared);
}
|
1e6f50d1aa98662f67ef9f5b983809913eb3ba64.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <string>
#include <iostream>
#include <fstream>
#include <hip/hip_runtime.h>
#include <utils/init_cuda.h>
#include <communication/comm.h>
#include <communication/linear_partitioner.h>
#include <communication/parted_map.h>
#include <numerical_algos/vectors/block_vector.h>
#include <numerical_algos/matrices/brs_matrix.h>
#include <numerical_algos/matrices/brs_matrix_impl.h>
//TODO for_each default block size
typedef float real;
static const t_tensor_field_storage storage = TFS_DEVICE;
typedef communication::linear_partitioner partitioner_t;
typedef communication::parted_map<partitioner_t> map_t;
typedef numerical_algos::block_vector<real,storage,map_t> vector_t;
typedef numerical_algos::brs_matrix<real,storage,map_t> matrix_t;
typedef matrix_t::structure_type matrix_structure_t;
//TODO move it some common header and use in brs_matrix and brs_matrix_structure
int read_matrix_size(const std::string &fn)
{
std::ifstream f(fn.c_str(), std::ifstream::in);
if (!f) throw std::runtime_error("read_matrix_size: error while opening file " + fn);
std::string buf;
int algebraic_rows_n, algebraic_cols_n,
algebraic_nonzeros_n;
int block_row_size_, block_col_size_;
int glob_rows_n_, glob_cols_n_;
int glob_nonzeros_n_;
if (!getline(f,buf)) throw std::runtime_error("read_matrix_size: error while reading first line");
if (!(f >> buf >> block_row_size_ >> block_col_size_)) throw std::runtime_error("read_matrix_size: error while read block sizes");
if (block_row_size_ != block_col_size_) throw std::runtime_error("read_matrix_size: block is not square");
if (!(f >> algebraic_rows_n >> algebraic_cols_n >> algebraic_nonzeros_n)) throw std::runtime_error("read_matrix_size: error while read sizes");
if (algebraic_rows_n != algebraic_cols_n) throw std::runtime_error("read_matrix_size: matrix is not square");
if (algebraic_rows_n%block_row_size_ != 0) throw std::runtime_error("read_matrix_size: matrix size is not divider of block size");
if (algebraic_nonzeros_n%(block_row_size_*block_col_size_) != 0) throw std::runtime_error("read_matrix_size: matrix nonzero size is not divider of block size square");
glob_rows_n_ = algebraic_rows_n/block_row_size_;
glob_cols_n_ = algebraic_cols_n/block_col_size_;
glob_nonzeros_n_ = algebraic_nonzeros_n/(block_row_size_*block_col_size_);
return glob_rows_n_;
}
int main(int argc, char **args)
{
if (MPI_Init(&argc, &args) != MPI_SUCCESS) {
std::cout << "ERROR: MPI_Init call failed ; abort" << std::endl;
return 1;
}
int comm_rank = communication::get_comm_rank(),
comm_size = communication::get_comm_size();
if (argc < 6) {
if (comm_rank == 0)
std::cout << "USAGE: " << std::string(args[0]) << " <matrix_fn> <vector_fn> <result_fn> <apply_type> <color_perm_fn>" << std::endl;
if (MPI_Finalize() != MPI_SUCCESS) {
std::cout << "WARNING: MPI_Finalize call failed" << std::endl;
return 2;
}
return 0;
}
std::string mat_fn(args[1]), vec_fn(args[2]), res_fn(args[3]);
int apply_type = atoi(args[4]);
std::string color_perm_fn(args[5]);
utils::init_cuda(1+comm_rank);
hipsparseHandle_t handle = 0;
CUSPARSE_SAFE_CALL( hipsparseCreate(&handle) );
int glob_size = read_matrix_size(mat_fn);
partitioner_t partitioner(glob_size, comm_size, comm_rank);
map_t map(partitioner, true);
vector_t res, vec;
matrix_structure_t mat_str;
matrix_t mat;
mat_str.pre_init_from_file(&map, mat_fn);
map.complete();
mat_str.init();
mat_str.print_stat();
mat.init(handle, &mat_str);
mat.read_from_file(mat_fn);
vec.init_from_file(map, vec_fn);
res.init(map, vec.block_size());
//vec.size()
if (apply_type == 1)
mat.apply(vec, res);
else if (apply_type == 2)
mat.apply_inverted_lower(vec, res);
else if (apply_type == 3)
mat.apply_inverted_upper(vec, res);
else
throw std::runtime_error("wrong apply_type argument");
if (color_perm_fn != "none") mat_str.write_colored_perm(color_perm_fn);
res.write_to_file(map, res_fn);
if (MPI_Finalize() != MPI_SUCCESS) {
std::cout << "WARNING: MPI_Finalize call failed" << std::endl;
return 3;
}
return 0;
}
|
1e6f50d1aa98662f67ef9f5b983809913eb3ba64.cu
|
#include <string>
#include <iostream>
#include <fstream>
#include <cuda_runtime.h>
#include <utils/init_cuda.h>
#include <communication/comm.h>
#include <communication/linear_partitioner.h>
#include <communication/parted_map.h>
#include <numerical_algos/vectors/block_vector.h>
#include <numerical_algos/matrices/brs_matrix.h>
#include <numerical_algos/matrices/brs_matrix_impl.h>
//TODO for_each default block size
typedef float real;
static const t_tensor_field_storage storage = TFS_DEVICE;
typedef communication::linear_partitioner partitioner_t;
typedef communication::parted_map<partitioner_t> map_t;
typedef numerical_algos::block_vector<real,storage,map_t> vector_t;
typedef numerical_algos::brs_matrix<real,storage,map_t> matrix_t;
typedef matrix_t::structure_type matrix_structure_t;
//TODO move it some common header and use in brs_matrix and brs_matrix_structure
int read_matrix_size(const std::string &fn)
{
std::ifstream f(fn.c_str(), std::ifstream::in);
if (!f) throw std::runtime_error("read_matrix_size: error while opening file " + fn);
std::string buf;
int algebraic_rows_n, algebraic_cols_n,
algebraic_nonzeros_n;
int block_row_size_, block_col_size_;
int glob_rows_n_, glob_cols_n_;
int glob_nonzeros_n_;
if (!getline(f,buf)) throw std::runtime_error("read_matrix_size: error while reading first line");
if (!(f >> buf >> block_row_size_ >> block_col_size_)) throw std::runtime_error("read_matrix_size: error while read block sizes");
if (block_row_size_ != block_col_size_) throw std::runtime_error("read_matrix_size: block is not square");
if (!(f >> algebraic_rows_n >> algebraic_cols_n >> algebraic_nonzeros_n)) throw std::runtime_error("read_matrix_size: error while read sizes");
if (algebraic_rows_n != algebraic_cols_n) throw std::runtime_error("read_matrix_size: matrix is not square");
if (algebraic_rows_n%block_row_size_ != 0) throw std::runtime_error("read_matrix_size: matrix size is not divider of block size");
if (algebraic_nonzeros_n%(block_row_size_*block_col_size_) != 0) throw std::runtime_error("read_matrix_size: matrix nonzero size is not divider of block size square");
glob_rows_n_ = algebraic_rows_n/block_row_size_;
glob_cols_n_ = algebraic_cols_n/block_col_size_;
glob_nonzeros_n_ = algebraic_nonzeros_n/(block_row_size_*block_col_size_);
return glob_rows_n_;
}
int main(int argc, char **args)
{
if (MPI_Init(&argc, &args) != MPI_SUCCESS) {
std::cout << "ERROR: MPI_Init call failed ; abort" << std::endl;
return 1;
}
int comm_rank = communication::get_comm_rank(),
comm_size = communication::get_comm_size();
if (argc < 6) {
if (comm_rank == 0)
std::cout << "USAGE: " << std::string(args[0]) << " <matrix_fn> <vector_fn> <result_fn> <apply_type> <color_perm_fn>" << std::endl;
if (MPI_Finalize() != MPI_SUCCESS) {
std::cout << "WARNING: MPI_Finalize call failed" << std::endl;
return 2;
}
return 0;
}
std::string mat_fn(args[1]), vec_fn(args[2]), res_fn(args[3]);
int apply_type = atoi(args[4]);
std::string color_perm_fn(args[5]);
utils::init_cuda(1+comm_rank);
cusparseHandle_t handle = 0;
CUSPARSE_SAFE_CALL( cusparseCreate(&handle) );
int glob_size = read_matrix_size(mat_fn);
partitioner_t partitioner(glob_size, comm_size, comm_rank);
map_t map(partitioner, true);
vector_t res, vec;
matrix_structure_t mat_str;
matrix_t mat;
mat_str.pre_init_from_file(&map, mat_fn);
map.complete();
mat_str.init();
mat_str.print_stat();
mat.init(handle, &mat_str);
mat.read_from_file(mat_fn);
vec.init_from_file(map, vec_fn);
res.init(map, vec.block_size());
//vec.size()
if (apply_type == 1)
mat.apply(vec, res);
else if (apply_type == 2)
mat.apply_inverted_lower(vec, res);
else if (apply_type == 3)
mat.apply_inverted_upper(vec, res);
else
throw std::runtime_error("wrong apply_type argument");
if (color_perm_fn != "none") mat_str.write_colored_perm(color_perm_fn);
res.write_to_file(map, res_fn);
if (MPI_Finalize() != MPI_SUCCESS) {
std::cout << "WARNING: MPI_Finalize call failed" << std::endl;
return 3;
}
return 0;
}
|
b45ffefd55c18d83bcde0f79ff8972ee3a9f5f7b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* DATA_SIZE CPU GPU - 1 */
/* - rev.201905 by Yoshiki NAGATANI */
#include <stdio.h>
#include <stdlib.h>
/* DATA_SIZE = BLOCK_SIZE * GRID_SIZE () */
#define DATA_SIZE 1048576
#define BLOCK_SIZE 256
#define GRID_SIZE (DATA_SIZE/BLOCK_SIZE)
/* REPEAT */
#define REPEAT 10000
/*-----------------------------------------------------------*/
/* CPU() */
void MultiplyOnCPU(float* h_data_A, float* h_data_B, float* h_data_R) {
long i;
/* CPU for */
for (i = 0; i < DATA_SIZE; i++) {
h_data_R[i] = h_data_A[i] * h_data_B[i];
}
}
/*-----------------------------------------------------------*/
/* GPU */
__global__ void MultiplyOnGPU(float* d_data_A, float* d_data_B, float* d_data_R) {
int id = blockDim.x * blockIdx.x + threadIdx.x;
/* GPU for (id) OK */
d_data_R[id] = d_data_A[id] * d_data_B[id];
}
/*-----------------------------------------------------------*/
int main(void) {
int i;
printf("DATA_SIZE(%d) = BLOCK_SIZE(%d) x GRID_SIZE(%d).\n", DATA_SIZE, BLOCK_SIZE, GRID_SIZE);
float* h_data_A; /* Host(CPU) */
float* h_data_B; /* Host(CPU) */
float* h_data_R; /* Host(CPU) */
float* h_data_R_fromGPU; /* Host(CPU) */
float* d_data_A; /* Device(GPU) */
float* d_data_B; /* Device(GPU) */
float* d_data_R; /* Device(GPU) */
/* (CPU) */
h_data_A = (float*)malloc(DATA_SIZE * sizeof(float));
h_data_B = (float*)malloc(DATA_SIZE * sizeof(float));
h_data_R = (float*)malloc(DATA_SIZE * sizeof(float));
h_data_R_fromGPU = (float*)malloc(DATA_SIZE * sizeof(float));
/* (GPU) */
hipMalloc((void**)& d_data_A, DATA_SIZE * sizeof(float));
hipMalloc((void**)& d_data_B, DATA_SIZE * sizeof(float));
hipMalloc((void**)& d_data_R, DATA_SIZE * sizeof(float));
/* (CPU) */
for (i = 0; i < DATA_SIZE; i++) {
h_data_A[i] = (double)(rand()) / 32768.0;
h_data_B[i] = (double)(rand()) / 32768.0;
h_data_R[i] = 0.0;
}
/* (CPUGPU) */
hipMemcpy(d_data_A, h_data_A, DATA_SIZE * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_data_B, h_data_B, DATA_SIZE * sizeof(float), hipMemcpyHostToDevice);
/* (CPU) REPEAT */
printf("Start calculation on CPU for %d times...", REPEAT);
for (i = 0; i < REPEAT; i++) {
MultiplyOnCPU(h_data_A, h_data_B, h_data_R);
}
printf("done!!\n");
/* (GPU) REPEAT */
printf("Start calculation on GPU for %d times...", REPEAT);
for (i = 0; i < REPEAT; i++) {
MultiplyOnGPU << <GRID_SIZE, BLOCK_SIZE >> > (d_data_A, d_data_B, d_data_R);
}
printf("done!!\n");
/* (CPUGPU) */
hipMemcpy(h_data_R_fromGPU, d_data_R, DATA_SIZE * sizeof(float), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
/* (CPU)() */
printf("Comparison of the Results:\n");
printf(" %8d: CPU:%f vs GPU:%f\n", 0, h_data_R[0], h_data_R_fromGPU[0]);
printf(" %8d: CPU:%f vs GPU:%f\n", DATA_SIZE - 1, h_data_R[DATA_SIZE - 1], h_data_R_fromGPU[DATA_SIZE - 1]);
hipDeviceReset();
return 0;
}
|
b45ffefd55c18d83bcde0f79ff8972ee3a9f5f7b.cu
|
/* DATA_SIZE 個の浮動小数点の積演算を CPU および GPU でおこなう - 1 */
/* - rev.201905 by Yoshiki NAGATANI */
#include <stdio.h>
#include <stdlib.h>
/* DATA_SIZE = BLOCK_SIZE * GRID_SIZE で割り切れること(プログラム側ではノーチェック) */
#define DATA_SIZE 1048576
#define BLOCK_SIZE 256
#define GRID_SIZE (DATA_SIZE/BLOCK_SIZE)
/* 速度比較のため同じ計算を REPEAT 回繰り返す */
#define REPEAT 10000
/*-----------------------------------------------------------*/
/* CPU側で積演算をおこなう関数(単一コア) */
void MultiplyOnCPU(float* h_data_A, float* h_data_B, float* h_data_R) {
long i;
/* CPU ではデータの数だけ for 文をまわす */
for (i = 0; i < DATA_SIZE; i++) {
h_data_R[i] = h_data_A[i] * h_data_B[i];
}
}
/*-----------------------------------------------------------*/
/* GPU側で積演算をおこなう関数 */
__global__ void MultiplyOnGPU(float* d_data_A, float* d_data_B, float* d_data_R) {
int id = blockDim.x * blockIdx.x + threadIdx.x;
/* GPU では for 文ではなく,自分の担当のデータ(id)だけ計算すれば OK */
d_data_R[id] = d_data_A[id] * d_data_B[id];
}
/*-----------------------------------------------------------*/
int main(void) {
int i;
printf("DATA_SIZE(%d) = BLOCK_SIZE(%d) x GRID_SIZE(%d).\n", DATA_SIZE, BLOCK_SIZE, GRID_SIZE);
float* h_data_A; /* Host(CPU)側メモリ */
float* h_data_B; /* Host(CPU)側メモリ */
float* h_data_R; /* Host(CPU)側メモリ */
float* h_data_R_fromGPU; /* Host(CPU)側メモリ(結果のチェック専用) */
float* d_data_A; /* Device(GPU)側メモリ */
float* d_data_B; /* Device(GPU)側メモリ */
float* d_data_R; /* Device(GPU)側メモリ */
/* ホスト(CPU)側メモリ領域の確保(可読性重視のためエラーチェック無しなので注意) */
h_data_A = (float*)malloc(DATA_SIZE * sizeof(float));
h_data_B = (float*)malloc(DATA_SIZE * sizeof(float));
h_data_R = (float*)malloc(DATA_SIZE * sizeof(float));
h_data_R_fromGPU = (float*)malloc(DATA_SIZE * sizeof(float));
/* デバイス(GPU)側メモリ領域の確保(可読性重視のためエラーチェック無しなので注意) */
cudaMalloc((void**)& d_data_A, DATA_SIZE * sizeof(float));
cudaMalloc((void**)& d_data_B, DATA_SIZE * sizeof(float));
cudaMalloc((void**)& d_data_R, DATA_SIZE * sizeof(float));
/* データ生成(この例ではCPU側で生成している) */
for (i = 0; i < DATA_SIZE; i++) {
h_data_A[i] = (double)(rand()) / 32768.0;
h_data_B[i] = (double)(rand()) / 32768.0;
h_data_R[i] = 0.0;
}
/* デバイスにメモリ内容をコピー(CPU→GPU) */
cudaMemcpy(d_data_A, h_data_A, DATA_SIZE * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_data_B, h_data_B, DATA_SIZE * sizeof(float), cudaMemcpyHostToDevice);
/* ホスト(CPU)で積演算を実行(速度計測のため REPEAT 回繰り返し) */
printf("Start calculation on CPU for %d times...", REPEAT);
for (i = 0; i < REPEAT; i++) {
MultiplyOnCPU(h_data_A, h_data_B, h_data_R);
}
printf("done!!\n");
/* デバイス(GPU)で積演算を実行(速度計測のため REPEAT 回繰り返し) */
printf("Start calculation on GPU for %d times...", REPEAT);
for (i = 0; i < REPEAT; i++) {
MultiplyOnGPU << <GRID_SIZE, BLOCK_SIZE >> > (d_data_A, d_data_B, d_data_R);
}
printf("done!!\n");
/* デバイスからメモリ内容をコピー(CPU←GPU) */
cudaMemcpy(h_data_R_fromGPU, d_data_R, DATA_SIZE * sizeof(float), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
/* 結果の比較(CPU上で)(画面表示の都合上,最初と最後のデータだけ表示) */
printf("Comparison of the Results:\n");
printf(" %8d: CPU:%f vs GPU:%f\n", 0, h_data_R[0], h_data_R_fromGPU[0]);
printf(" %8d: CPU:%f vs GPU:%f\n", DATA_SIZE - 1, h_data_R[DATA_SIZE - 1], h_data_R_fromGPU[DATA_SIZE - 1]);
cudaDeviceReset();
return 0;
}
|
d50752615b73817926890dcf676fb48396d952d3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
__global__ void add( int*a, int*b, int*c ) {
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#define N 256
int main( void ) {
int *a, *b, *c; // host copies of a,b,c
int *dev_a, *dev_b, *dev_c; // device copies of a, b, c
int size = N *sizeof( int); // we need space for N integers
// allocate device copies of a, b, c
hipMalloc( (void**)&dev_a, size );
hipMalloc( (void**)&dev_b, size );
hipMalloc( (void**)&dev_c, size );
a = (int*)malloc( size );
b = (int*)malloc( size );
c = (int*)malloc( size );
/* initialize random seed: */
srand ( time(NULL) );
for (int i=0; i<N; i+=N/8 )
{ a[i] = rand() %100 + 1;
printf("a %i ",a[i]);
}
printf(" end of a \n\n");
for (int i=0; i<N; i+=N/8 )
{ b[i] = rand() %100 + 1;
printf("b %i ",b[i]);
}
printf(" end of b \n\n");
// copy inputs to device
hipMemcpy( dev_a, a, size, hipMemcpyHostToDevice);
hipMemcpy( dev_b, b, size, hipMemcpyHostToDevice);
// launch add() kernel with N parallel blockshipLaunchKernelGGL((
add), dim3(N), dim3(1) , 0, 0, dev_a, dev_b, dev_c);
// copy device result back to host copy of c
hipMemcpy( c, dev_c, size, hipMemcpyDeviceToHost);
for(int i=0;i<N;printf("c %i ",c[i]), i+=N/8 );
free( a ); free( b ); free( c );
hipFree( dev_a);
hipFree( dev_b);
hipFree( dev_c);
return 0;
}
|
d50752615b73817926890dcf676fb48396d952d3.cu
|
#include <iostream>
__global__ void add( int*a, int*b, int*c ) {
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#define N 256
int main( void ) {
int *a, *b, *c; // host copies of a,b,c
int *dev_a, *dev_b, *dev_c; // device copies of a, b, c
int size = N *sizeof( int); // we need space for N integers
// allocate device copies of a, b, c
cudaMalloc( (void**)&dev_a, size );
cudaMalloc( (void**)&dev_b, size );
cudaMalloc( (void**)&dev_c, size );
a = (int*)malloc( size );
b = (int*)malloc( size );
c = (int*)malloc( size );
/* initialize random seed: */
srand ( time(NULL) );
for (int i=0; i<N; i+=N/8 )
{ a[i] = rand() %100 + 1;
printf("a %i ",a[i]);
}
printf(" end of a \n\n");
for (int i=0; i<N; i+=N/8 )
{ b[i] = rand() %100 + 1;
printf("b %i ",b[i]);
}
printf(" end of b \n\n");
// copy inputs to device
cudaMemcpy( dev_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy( dev_b, b, size, cudaMemcpyHostToDevice);
// launch add() kernel with N parallel blocks
add<<< N, 1 >>>( dev_a, dev_b, dev_c);
// copy device result back to host copy of c
cudaMemcpy( c, dev_c, size, cudaMemcpyDeviceToHost);
for(int i=0;i<N;printf("c %i ",c[i]), i+=N/8 );
free( a ); free( b ); free( c );
cudaFree( dev_a);
cudaFree( dev_b);
cudaFree( dev_c);
return 0;
}
|
3080f44f2420afd8895685592425baf795bf36cc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from magmablas/zgemm_reduce.cu, normal z -> d, Wed Jan 2 14:18:50 2019
*/
#include "magma_internal.h"
#include "magma_templates.h"
// size of work for a thread block
#define BLK_M 16
#define BLK_N 16
// BLK_K gets defined in magmablas_dgemm_reduce,
// because it depends on the CUDA architecture at runtime.
/******************************************************************************/
// BLK_K size is templated, as it depends on CUDA architecture at runtime.
// Hmm... how to compile for both CUDA arch 1.x and 2.x?
template< int BLK_K >
__global__
void dgemm_reduce_kernel(
int m, int n, int k,
double alpha,
const double* __restrict__ dA, int lda,
const double* __restrict__ dB, int ldb,
double beta,
double * __restrict__ dC, int ldc)
{
#if (__CUDA_ARCH__ >= 200)
const int tx = threadIdx.x;
if (blockIdx.x*BLK_M + threadIdx.y < m && blockIdx.y*BLK_N + threadIdx.z < n) {
dA += (blockIdx.x*BLK_M + threadIdx.y) * lda;
dB += (blockIdx.y*BLK_N + threadIdx.z) * ldb;
dC += blockIdx.x*BLK_M + blockIdx.y*BLK_N * ldc;
// was: sum[BLK_M][BLK_N+1][BLK_K+1];
// moved 3rd dimension to 1st dimension to make magma_sum_reduce_3d interface nicer.
__shared__ double sum[BLK_K][BLK_M+1][BLK_N+1];
double lsum;
/* w := v**H * C */
lsum = MAGMA_D_ZERO;
for( int j = tx; j < k; j += BLK_K )
lsum += MAGMA_D_CONJ( dA[j] )* dB[j];
sum[tx][threadIdx.y][threadIdx.z] = lsum;
magma_sum_reduce_3d< BLK_K, BLK_M+1, BLK_N+1 >( tx, threadIdx.y, threadIdx.z, sum );
/* C := C - v * w */
__syncthreads();
if (threadIdx.x == 0) {
if (MAGMA_D_EQUAL(beta, MAGMA_D_ZERO))
dC[threadIdx.y + threadIdx.z*ldc] = alpha*sum[0][threadIdx.y][threadIdx.z];
else
dC[threadIdx.y + threadIdx.z*ldc] = beta* dC[threadIdx.y + threadIdx.z*ldc] +
alpha*sum[0][threadIdx.y][threadIdx.z];
}
}
#endif
}
/***************************************************************************//**
Purpose
-------
DGEMM_REDUCE performs one of the matrix-matrix operations
C := alpha*A^T*B + beta*C,
where alpha and beta are scalars, and A, B and C are matrices, with A
a k-by-m matrix, B a k-by-n matrix, and C an m-by-n matrix.
This routine is tuned for m, n << k. Typically, m and n are expected
to be less than 128.
@ingroup magma_gemm
*******************************************************************************/
extern "C" void
magmablas_dgemm_reduce(
magma_int_t m, magma_int_t n, magma_int_t k,
double alpha,
magmaDouble_const_ptr dA, magma_int_t ldda,
magmaDouble_const_ptr dB, magma_int_t lddb,
double beta,
magmaDouble_ptr dC, magma_int_t lddc,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( k < 0 )
info = -3;
else if ( ldda < m )
info = -6;
else if ( lddb < k )
info = -8;
else if ( lddc < m )
info = -11;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// call CUDA ARCH 1.x -- maximum 512 threads
const int NUM_THREADS = 512;
const int BLK_K = (NUM_THREADS / (BLK_M * BLK_N)); // == 2
dim3 threads( BLK_K, BLK_M, BLK_N );
dim3 blocks( magma_ceildiv( m, BLK_M ), magma_ceildiv( n, BLK_N ), 1 );
hipLaunchKernelGGL(( dgemm_reduce_kernel<BLK_K>) , dim3(blocks), dim3(threads), 0, queue->cuda_stream() ,
m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc );
}
else {
// --------------------
// call CUDA ARCH 2.x -- maximum 1024 threads
const int NUM_THREADS = 1024;
const int BLK_K = (NUM_THREADS / (BLK_M * BLK_N)); // == 4
dim3 threads( BLK_K, BLK_M, BLK_N );
dim3 blocks( magma_ceildiv( m, BLK_M ), magma_ceildiv( n, BLK_N ), 1 );
hipLaunchKernelGGL(( dgemm_reduce_kernel<BLK_K>) , dim3(blocks), dim3(threads), 0, queue->cuda_stream() ,
m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc );
}
}
|
3080f44f2420afd8895685592425baf795bf36cc.cu
|
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from magmablas/zgemm_reduce.cu, normal z -> d, Wed Jan 2 14:18:50 2019
*/
#include "magma_internal.h"
#include "magma_templates.h"
// size of work for a thread block
#define BLK_M 16
#define BLK_N 16
// BLK_K gets defined in magmablas_dgemm_reduce,
// because it depends on the CUDA architecture at runtime.
/******************************************************************************/
// BLK_K size is templated, as it depends on CUDA architecture at runtime.
// Hmm... how to compile for both CUDA arch 1.x and 2.x?
template< int BLK_K >
__global__
void dgemm_reduce_kernel(
int m, int n, int k,
double alpha,
const double* __restrict__ dA, int lda,
const double* __restrict__ dB, int ldb,
double beta,
double * __restrict__ dC, int ldc)
{
#if (__CUDA_ARCH__ >= 200)
const int tx = threadIdx.x;
if (blockIdx.x*BLK_M + threadIdx.y < m && blockIdx.y*BLK_N + threadIdx.z < n) {
dA += (blockIdx.x*BLK_M + threadIdx.y) * lda;
dB += (blockIdx.y*BLK_N + threadIdx.z) * ldb;
dC += blockIdx.x*BLK_M + blockIdx.y*BLK_N * ldc;
// was: sum[BLK_M][BLK_N+1][BLK_K+1];
// moved 3rd dimension to 1st dimension to make magma_sum_reduce_3d interface nicer.
__shared__ double sum[BLK_K][BLK_M+1][BLK_N+1];
double lsum;
/* w := v**H * C */
lsum = MAGMA_D_ZERO;
for( int j = tx; j < k; j += BLK_K )
lsum += MAGMA_D_CONJ( dA[j] )* dB[j];
sum[tx][threadIdx.y][threadIdx.z] = lsum;
magma_sum_reduce_3d< BLK_K, BLK_M+1, BLK_N+1 >( tx, threadIdx.y, threadIdx.z, sum );
/* C := C - v * w */
__syncthreads();
if (threadIdx.x == 0) {
if (MAGMA_D_EQUAL(beta, MAGMA_D_ZERO))
dC[threadIdx.y + threadIdx.z*ldc] = alpha*sum[0][threadIdx.y][threadIdx.z];
else
dC[threadIdx.y + threadIdx.z*ldc] = beta* dC[threadIdx.y + threadIdx.z*ldc] +
alpha*sum[0][threadIdx.y][threadIdx.z];
}
}
#endif
}
/***************************************************************************//**
Purpose
-------
DGEMM_REDUCE performs one of the matrix-matrix operations
C := alpha*A^T*B + beta*C,
where alpha and beta are scalars, and A, B and C are matrices, with A
a k-by-m matrix, B a k-by-n matrix, and C an m-by-n matrix.
This routine is tuned for m, n << k. Typically, m and n are expected
to be less than 128.
@ingroup magma_gemm
*******************************************************************************/
extern "C" void
magmablas_dgemm_reduce(
magma_int_t m, magma_int_t n, magma_int_t k,
double alpha,
magmaDouble_const_ptr dA, magma_int_t ldda,
magmaDouble_const_ptr dB, magma_int_t lddb,
double beta,
magmaDouble_ptr dC, magma_int_t lddc,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( k < 0 )
info = -3;
else if ( ldda < m )
info = -6;
else if ( lddb < k )
info = -8;
else if ( lddc < m )
info = -11;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// call CUDA ARCH 1.x -- maximum 512 threads
const int NUM_THREADS = 512;
const int BLK_K = (NUM_THREADS / (BLK_M * BLK_N)); // == 2
dim3 threads( BLK_K, BLK_M, BLK_N );
dim3 blocks( magma_ceildiv( m, BLK_M ), magma_ceildiv( n, BLK_N ), 1 );
dgemm_reduce_kernel<BLK_K> <<< blocks, threads, 0, queue->cuda_stream() >>>
( m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc );
}
else {
// --------------------
// call CUDA ARCH 2.x -- maximum 1024 threads
const int NUM_THREADS = 1024;
const int BLK_K = (NUM_THREADS / (BLK_M * BLK_N)); // == 4
dim3 threads( BLK_K, BLK_M, BLK_N );
dim3 blocks( magma_ceildiv( m, BLK_M ), magma_ceildiv( n, BLK_N ), 1 );
dgemm_reduce_kernel<BLK_K> <<< blocks, threads, 0, queue->cuda_stream() >>>
( m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc );
}
}
|
ce9f0b15017ba0ae4752dc746ffb78de8dbf19a9.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/copying.hpp>
#include <cudf/stream_compaction.hpp>
#include <cudf/detail/stream_compaction.hpp>
#include <cudf/detail/search.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/dictionary/encode.hpp>
#include <cudf/dictionary/detail/encode.hpp>
#include <cudf/dictionary/dictionary_factories.hpp>
namespace cudf
{
namespace dictionary
{
namespace detail
{
/**
* @brief Create a new dictionary column from a column_view.
*
*/
std::unique_ptr<column> encode( column_view const& input_column,
data_type indices_type,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
CUDF_EXPECTS( indices_type.id()==INT32, "only INT32 type for indices");
// side effects of this function were are now dependent on:
// - resulting column elements are sorted ascending
// - nulls are sorted to the beginning
auto table_keys = experimental::detail::drop_duplicates( table_view{{input_column}},
std::vector<size_type>{0},
experimental::duplicate_keep_option::KEEP_FIRST,
true, mr, stream )->release(); // true == nulls are equal
std::unique_ptr<column> keys_column(std::move(table_keys.front()));
if( input_column.has_nulls() )
{
// the single null entry should be at the beginning -- side effect from drop_duplicates
// copy the column without the null entry
keys_column = std::make_unique<column>(experimental::slice(keys_column->view(),
std::vector<size_type>{1,keys_column->size()}).front(),stream,mr);
keys_column->set_null_mask( rmm::device_buffer{}, 0 ); // remove the null-mask
}
// this returns a column with no null entries
// - it appears to ignore the null entries in the input and tries to place the value regardless
auto indices_column = cudf::experimental::detail::lower_bound(table_view{{keys_column->view()}},
table_view{{input_column}},
std::vector<order>{order::ASCENDING},
std::vector<null_order>{null_order::AFTER},
mr, stream );
// we should probably copy/cast to INT32 type if different
CUDF_EXPECTS( indices_column->type() == indices_type, "expecting INT32 indices type" );
// create column with keys_column and indices_column
return make_dictionary_column( std::move(keys_column), std::move(indices_column),
copy_bitmask( input_column, stream, mr), input_column.null_count() );
}
} // namespace detail
// external API
std::unique_ptr<column> encode( column_view const& input_column,
data_type indices_type,
rmm::mr::device_memory_resource* mr )
{
return detail::encode( input_column, indices_type, mr );
}
} // namespace dictionary
} // namespace cudf
|
ce9f0b15017ba0ae4752dc746ffb78de8dbf19a9.cu
|
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/copying.hpp>
#include <cudf/stream_compaction.hpp>
#include <cudf/detail/stream_compaction.hpp>
#include <cudf/detail/search.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/dictionary/encode.hpp>
#include <cudf/dictionary/detail/encode.hpp>
#include <cudf/dictionary/dictionary_factories.hpp>
namespace cudf
{
namespace dictionary
{
namespace detail
{
/**
* @brief Create a new dictionary column from a column_view.
*
*/
std::unique_ptr<column> encode( column_view const& input_column,
data_type indices_type,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
CUDF_EXPECTS( indices_type.id()==INT32, "only INT32 type for indices");
// side effects of this function were are now dependent on:
// - resulting column elements are sorted ascending
// - nulls are sorted to the beginning
auto table_keys = experimental::detail::drop_duplicates( table_view{{input_column}},
std::vector<size_type>{0},
experimental::duplicate_keep_option::KEEP_FIRST,
true, mr, stream )->release(); // true == nulls are equal
std::unique_ptr<column> keys_column(std::move(table_keys.front()));
if( input_column.has_nulls() )
{
// the single null entry should be at the beginning -- side effect from drop_duplicates
// copy the column without the null entry
keys_column = std::make_unique<column>(experimental::slice(keys_column->view(),
std::vector<size_type>{1,keys_column->size()}).front(),stream,mr);
keys_column->set_null_mask( rmm::device_buffer{}, 0 ); // remove the null-mask
}
// this returns a column with no null entries
// - it appears to ignore the null entries in the input and tries to place the value regardless
auto indices_column = cudf::experimental::detail::lower_bound(table_view{{keys_column->view()}},
table_view{{input_column}},
std::vector<order>{order::ASCENDING},
std::vector<null_order>{null_order::AFTER},
mr, stream );
// we should probably copy/cast to INT32 type if different
CUDF_EXPECTS( indices_column->type() == indices_type, "expecting INT32 indices type" );
// create column with keys_column and indices_column
return make_dictionary_column( std::move(keys_column), std::move(indices_column),
copy_bitmask( input_column, stream, mr), input_column.null_count() );
}
} // namespace detail
// external API
std::unique_ptr<column> encode( column_view const& input_column,
data_type indices_type,
rmm::mr::device_memory_resource* mr )
{
return detail::encode( input_column, indices_type, mr );
}
} // namespace dictionary
} // namespace cudf
|
19aeedd7026dc721f655e1d5a97eb644d84350e1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <opencv2/opencv.hpp>
#include <opencv2/core/cuda.hpp>
#include <iostream>
#include <fstream>
#include <chrono>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/random.h>
using namespace cv;
using namespace std;
// Texture
texture<float4, hipTextureType2D, hipReadModeElementType> tex_imgYCrCb;
// Constant
const static int SAMPLE_NUM = 444;
__constant__ float2 const_Mcoor[SAMPLE_NUM];
__constant__ float4 const_marker[SAMPLE_NUM];
// Timer
class Timer {
typedef std::chrono::time_point<std::chrono::high_resolution_clock> Clock;
long long count;
bool running;
Clock prev_start_;
Clock Now() {
return std::chrono::high_resolution_clock::now();
}
public:
void Start() {
running = true;
prev_start_ = Now();
}
void Pause() {
if (running) {
running = false;
auto diff = Now() - prev_start_;
count += std::chrono::duration_cast<std::chrono::nanoseconds>(diff).count();
}
}
void Reset() {
running = false;
count = 0;
}
long long get_count() {
return count;
}
Timer() { Reset(); }
};
struct intwhprg {
int w, h;
__host__ __device__
intwhprg(int _w = 0, int _h = 100) {
w = _w;
h = _h;
}
__host__ __device__
int2 operator()(const int n) const {
thrust::default_random_engine rng;
thrust::uniform_int_distribution<int> distw(-1, w - 1);
thrust::uniform_int_distribution<int> disth(-1, h - 1);
rng.discard(n);
return make_int2(distw(rng), disth(rng));
}
};
__global__
void assign_kernel(float2 *mCoor, float4 *mValue, int2 *rand_coor, const cuda::PtrStepSz<float3> marker_d, const int2 mDim, const float2 markerDim) {
const int tIdx = threadIdx.x;
const int Idx = blockIdx.x * 128 + tIdx;
if (Idx >= SAMPLE_NUM)
return;
int x = rand_coor[Idx].x;
int y = rand_coor[Idx].y;
float3 v = marker_d(y, x);
mValue[Idx] = make_float4(v.x, v.y, v.z, 0);
float2 coor;
coor.x = (2 * float(x) - mDim.x) / mDim.x * markerDim.x;
coor.y = -(2 * float(y) - mDim.y) / mDim.y * markerDim.y;
mCoor[Idx] = coor;
}
void randSample(thrust::device_vector<float2>* mCoor, thrust::device_vector<float4>* mValue, const cuda::GpuMat &marker_d, const int2& mDim, const float2 markerDim) {
// rand pixel
thrust::device_vector<int2> rand_coor(SAMPLE_NUM, make_int2(0, 0));
thrust::counting_iterator<int> i0(58);
thrust::transform(i0, i0 + SAMPLE_NUM, rand_coor.begin(), intwhprg(mDim.x, mDim.y));
// get pixel value and position
const int BLOCK_NUM = (SAMPLE_NUM - 1) / 128 + 1;
assign_kernel << < BLOCK_NUM, 128 >> > (thrust::raw_pointer_cast(mCoor->data()), thrust::raw_pointer_cast(mValue->data()), thrust::raw_pointer_cast(rand_coor.data()), marker_d, mDim, markerDim);
// bind to const mem
hipMemcpyToSymbol(const_Mcoor, thrust::raw_pointer_cast(mCoor->data()), sizeof(float2)* SAMPLE_NUM, 0, hipMemcpyDeviceToDevice);
hipMemcpyToSymbol(const_marker, thrust::raw_pointer_cast(mValue->data()), sizeof(float4)* SAMPLE_NUM, 0, hipMemcpyDeviceToDevice);
}
int main() {
cout << hipGetErrorString(hipGetLastError()) << endl;
// read image
Mat marker = imread("marker.png");
marker.convertTo(marker, CV_32FC3, 1 / 255.0);
cvtColor(marker, marker, CV_BGR2YCrCb);
cuda::GpuMat marker_d(marker);
// allocate mem
thrust::device_vector<float2> mCoor(SAMPLE_NUM, make_float2(0, 0));
thrust::device_vector<float4> mValue(SAMPLE_NUM, make_float4(0, 0, 0, 0));
//float2* mCoor;
//float4* mValue;
//hipMalloc((void**)&mCoor, sizeof(float2) * SAMPLE_NUM);
//hipMalloc((void**)&mValue, sizeof(float4) * SAMPLE_NUM);
// initial parmeter
int2 mDim = make_int2(marker.cols, marker.rows);
float2 markerDim = make_float2(0.5 * marker.cols / marker.rows, 0.5);
// rand pixel
//thrust::device_vector<int2> rand_coor(SAMPLE_NUM, make_int2(0, 0));
//thrust::counting_iterator<int> i0(58);
//thrust::transform(i0, i0 + SAMPLE_NUM, rand_coor.begin(), intwhprg(mDim.x, mDim.y));
// rand sample
Timer timer;
timer.Reset(); timer.Start();
randSample(&mCoor, &mValue, marker_d, mDim, markerDim);
hipDeviceSynchronize();
timer.Pause();
cout << "GPU: " << timer.get_count() << " ns." << endl;
//ofstream outFile("outCuda.txt");
//if (!outFile)
// return 0;
//for (int i = 0; i < SAMPLE_NUM; i++) {
// float4 f4 = mValue[i];
// float2 f2 = mCoor[i];
// outFile << f4.x << " " << f4.y << " " << f4.z << " " << f4.w << " ";
// outFile << f2.x << " " << f2.y << endl;
//}
//outFile.close();
//hipFree((void*) mCoor);
//hipFree((void*) mValue);
// initial constant memory
float2 *coor2 = new float2[SAMPLE_NUM];
float4 *value = new float4[SAMPLE_NUM];
timer.Reset(); timer.Start();
for (int i = 0; i < SAMPLE_NUM; i++) {
//int2 xy = rand_coor[i];
int x = rand() % mDim.x;
int y = rand() % mDim.y;
Vec3f YCrCb = marker.at<Vec3f>(y, x);
value[i] = make_float4(YCrCb[0], YCrCb[1], YCrCb[2], 0);
coor2[i].x = (2 * float(x) - mDim.x) / mDim.x * markerDim.x;
coor2[i].y = -(2 * float(y) - mDim.y) / mDim.y * markerDim.y;
}
hipMemcpyToSymbol(const_Mcoor, coor2, sizeof(float2)* SAMPLE_NUM);
hipMemcpyToSymbol(const_marker, value, sizeof(float4)* SAMPLE_NUM);
hipDeviceSynchronize();
timer.Pause();
cout << "CPU: " << timer.get_count() << " ns." << endl;
// for checking...
//ofstream outFile1("out.txt");
//if (!outFile1)
// return 0;
//for (int i = 0; i < SAMPLE_NUM; i++) {
// float4 f4 = value[i];
// float2 f2 = coor2[i];
// outFile1 << f4.x << " " << f4.y << " " << f4.z << " " << f4.w << " ";
// outFile1 << f2.x << " " << f2.y << endl;
//}
//outFile1.close();
delete[] coor2;
delete[] value;
hipDeviceSynchronize();
cout << hipGetErrorString(hipGetLastError()) << endl;
return 0;
}
|
19aeedd7026dc721f655e1d5a97eb644d84350e1.cu
|
#include <opencv2/opencv.hpp>
#include <opencv2/core/cuda.hpp>
#include <iostream>
#include <fstream>
#include <chrono>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/random.h>
using namespace cv;
using namespace std;
// Texture
texture<float4, cudaTextureType2D, cudaReadModeElementType> tex_imgYCrCb;
// Constant
const static int SAMPLE_NUM = 444;
__constant__ float2 const_Mcoor[SAMPLE_NUM];
__constant__ float4 const_marker[SAMPLE_NUM];
// Timer
class Timer {
typedef std::chrono::time_point<std::chrono::high_resolution_clock> Clock;
long long count;
bool running;
Clock prev_start_;
Clock Now() {
return std::chrono::high_resolution_clock::now();
}
public:
void Start() {
running = true;
prev_start_ = Now();
}
void Pause() {
if (running) {
running = false;
auto diff = Now() - prev_start_;
count += std::chrono::duration_cast<std::chrono::nanoseconds>(diff).count();
}
}
void Reset() {
running = false;
count = 0;
}
long long get_count() {
return count;
}
Timer() { Reset(); }
};
struct intwhprg {
int w, h;
__host__ __device__
intwhprg(int _w = 0, int _h = 100) {
w = _w;
h = _h;
}
__host__ __device__
int2 operator()(const int n) const {
thrust::default_random_engine rng;
thrust::uniform_int_distribution<int> distw(-1, w - 1);
thrust::uniform_int_distribution<int> disth(-1, h - 1);
rng.discard(n);
return make_int2(distw(rng), disth(rng));
}
};
__global__
void assign_kernel(float2 *mCoor, float4 *mValue, int2 *rand_coor, const cuda::PtrStepSz<float3> marker_d, const int2 mDim, const float2 markerDim) {
const int tIdx = threadIdx.x;
const int Idx = blockIdx.x * 128 + tIdx;
if (Idx >= SAMPLE_NUM)
return;
int x = rand_coor[Idx].x;
int y = rand_coor[Idx].y;
float3 v = marker_d(y, x);
mValue[Idx] = make_float4(v.x, v.y, v.z, 0);
float2 coor;
coor.x = (2 * float(x) - mDim.x) / mDim.x * markerDim.x;
coor.y = -(2 * float(y) - mDim.y) / mDim.y * markerDim.y;
mCoor[Idx] = coor;
}
void randSample(thrust::device_vector<float2>* mCoor, thrust::device_vector<float4>* mValue, const cuda::GpuMat &marker_d, const int2& mDim, const float2 markerDim) {
// rand pixel
thrust::device_vector<int2> rand_coor(SAMPLE_NUM, make_int2(0, 0));
thrust::counting_iterator<int> i0(58);
thrust::transform(i0, i0 + SAMPLE_NUM, rand_coor.begin(), intwhprg(mDim.x, mDim.y));
// get pixel value and position
const int BLOCK_NUM = (SAMPLE_NUM - 1) / 128 + 1;
assign_kernel << < BLOCK_NUM, 128 >> > (thrust::raw_pointer_cast(mCoor->data()), thrust::raw_pointer_cast(mValue->data()), thrust::raw_pointer_cast(rand_coor.data()), marker_d, mDim, markerDim);
// bind to const mem
cudaMemcpyToSymbol(const_Mcoor, thrust::raw_pointer_cast(mCoor->data()), sizeof(float2)* SAMPLE_NUM, 0, cudaMemcpyDeviceToDevice);
cudaMemcpyToSymbol(const_marker, thrust::raw_pointer_cast(mValue->data()), sizeof(float4)* SAMPLE_NUM, 0, cudaMemcpyDeviceToDevice);
}
int main() {
cout << cudaGetErrorString(cudaGetLastError()) << endl;
// read image
Mat marker = imread("marker.png");
marker.convertTo(marker, CV_32FC3, 1 / 255.0);
cvtColor(marker, marker, CV_BGR2YCrCb);
cuda::GpuMat marker_d(marker);
// allocate mem
thrust::device_vector<float2> mCoor(SAMPLE_NUM, make_float2(0, 0));
thrust::device_vector<float4> mValue(SAMPLE_NUM, make_float4(0, 0, 0, 0));
//float2* mCoor;
//float4* mValue;
//cudaMalloc((void**)&mCoor, sizeof(float2) * SAMPLE_NUM);
//cudaMalloc((void**)&mValue, sizeof(float4) * SAMPLE_NUM);
// initial parmeter
int2 mDim = make_int2(marker.cols, marker.rows);
float2 markerDim = make_float2(0.5 * marker.cols / marker.rows, 0.5);
// rand pixel
//thrust::device_vector<int2> rand_coor(SAMPLE_NUM, make_int2(0, 0));
//thrust::counting_iterator<int> i0(58);
//thrust::transform(i0, i0 + SAMPLE_NUM, rand_coor.begin(), intwhprg(mDim.x, mDim.y));
// rand sample
Timer timer;
timer.Reset(); timer.Start();
randSample(&mCoor, &mValue, marker_d, mDim, markerDim);
cudaDeviceSynchronize();
timer.Pause();
cout << "GPU: " << timer.get_count() << " ns." << endl;
//ofstream outFile("outCuda.txt");
//if (!outFile)
// return 0;
//for (int i = 0; i < SAMPLE_NUM; i++) {
// float4 f4 = mValue[i];
// float2 f2 = mCoor[i];
// outFile << f4.x << " " << f4.y << " " << f4.z << " " << f4.w << " ";
// outFile << f2.x << " " << f2.y << endl;
//}
//outFile.close();
//cudaFree((void*) mCoor);
//cudaFree((void*) mValue);
// initial constant memory
float2 *coor2 = new float2[SAMPLE_NUM];
float4 *value = new float4[SAMPLE_NUM];
timer.Reset(); timer.Start();
for (int i = 0; i < SAMPLE_NUM; i++) {
//int2 xy = rand_coor[i];
int x = rand() % mDim.x;
int y = rand() % mDim.y;
Vec3f YCrCb = marker.at<Vec3f>(y, x);
value[i] = make_float4(YCrCb[0], YCrCb[1], YCrCb[2], 0);
coor2[i].x = (2 * float(x) - mDim.x) / mDim.x * markerDim.x;
coor2[i].y = -(2 * float(y) - mDim.y) / mDim.y * markerDim.y;
}
cudaMemcpyToSymbol(const_Mcoor, coor2, sizeof(float2)* SAMPLE_NUM);
cudaMemcpyToSymbol(const_marker, value, sizeof(float4)* SAMPLE_NUM);
cudaDeviceSynchronize();
timer.Pause();
cout << "CPU: " << timer.get_count() << " ns." << endl;
// for checking...
//ofstream outFile1("out.txt");
//if (!outFile1)
// return 0;
//for (int i = 0; i < SAMPLE_NUM; i++) {
// float4 f4 = value[i];
// float2 f2 = coor2[i];
// outFile1 << f4.x << " " << f4.y << " " << f4.z << " " << f4.w << " ";
// outFile1 << f2.x << " " << f2.y << endl;
//}
//outFile1.close();
delete[] coor2;
delete[] value;
cudaDeviceSynchronize();
cout << cudaGetErrorString(cudaGetLastError()) << endl;
return 0;
}
|
52a16783281520da64175ed6b9894739712aa54a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "criterion/backend/cuda/kernels/ViterbiPath.cuh"
#include <cassert>
#include <cfloat>
#include <hipcub/hipcub.hpp>
namespace {
constexpr int kBlockSize = 128;
__global__ void viterbiStep(
int N,
const float* trans,
const float* alpha_tp,
float* alpha_t,
int* beta_t) {
using BlockReduce =
hipcub::BlockReduce<hipcub::KeyValuePair<int, float>, kBlockSize>;
__shared__ typename BlockReduce::TempStorage tempStorage;
assert(blockDim.x == kBlockSize);
hipcub::KeyValuePair<int, float> threadMax(-1, -FLT_MAX);
for (int i = threadIdx.x; i < N; i += blockDim.x) {
int k = blockIdx.x * N + i;
float val = trans[k % (N * N)] + alpha_tp[k / (N * N) * N + i];
if (val > threadMax.value) {
threadMax.key = i;
threadMax.value = val;
}
}
auto result = BlockReduce(tempStorage).Reduce(threadMax, hipcub::ArgMax());
if (threadIdx.x == 0) {
alpha_t[blockIdx.x] += result.value;
beta_t[blockIdx.x] = result.key;
}
}
} // namespace
namespace w2l {
namespace cuda {
/**
* equivalent arrayfire implementation (for batchsize 1):
// pre: alpha = input
array maxvals;
array maxidxs;
for (int t = 1; t < T; ++t) {
max(maxvals, maxidxs, trans + tile(_alpha(span, t - 1), 1, N), 0);
_alpha(span, t) += moddims(maxvals, N);
_beta(span, t) = moddims(maxidxs, N);
}
*/
int viterbiPath(
int T,
int B,
int N,
const float* trans,
float* alpha,
int* beta,
hipStream_t stream) {
for (int t = 1; t < T; ++t) {
hipLaunchKernelGGL(( viterbiStep), dim3(B * N), dim3(kBlockSize), 0, stream,
N, trans, alpha + (t - 1) * B * N, alpha + t * B * N, beta + t * B * N);
}
return 0;
}
} // namespace cuda
} // namespace w2l
|
52a16783281520da64175ed6b9894739712aa54a.cu
|
#include "criterion/backend/cuda/kernels/ViterbiPath.cuh"
#include <cassert>
#include <cfloat>
#include <cub/cub.cuh>
namespace {
constexpr int kBlockSize = 128;
__global__ void viterbiStep(
int N,
const float* trans,
const float* alpha_tp,
float* alpha_t,
int* beta_t) {
using BlockReduce =
cub::BlockReduce<cub::KeyValuePair<int, float>, kBlockSize>;
__shared__ typename BlockReduce::TempStorage tempStorage;
assert(blockDim.x == kBlockSize);
cub::KeyValuePair<int, float> threadMax(-1, -FLT_MAX);
for (int i = threadIdx.x; i < N; i += blockDim.x) {
int k = blockIdx.x * N + i;
float val = trans[k % (N * N)] + alpha_tp[k / (N * N) * N + i];
if (val > threadMax.value) {
threadMax.key = i;
threadMax.value = val;
}
}
auto result = BlockReduce(tempStorage).Reduce(threadMax, cub::ArgMax());
if (threadIdx.x == 0) {
alpha_t[blockIdx.x] += result.value;
beta_t[blockIdx.x] = result.key;
}
}
} // namespace
namespace w2l {
namespace cuda {
/**
* equivalent arrayfire implementation (for batchsize 1):
// pre: alpha = input
array maxvals;
array maxidxs;
for (int t = 1; t < T; ++t) {
max(maxvals, maxidxs, trans + tile(_alpha(span, t - 1), 1, N), 0);
_alpha(span, t) += moddims(maxvals, N);
_beta(span, t) = moddims(maxidxs, N);
}
*/
int viterbiPath(
int T,
int B,
int N,
const float* trans,
float* alpha,
int* beta,
cudaStream_t stream) {
for (int t = 1; t < T; ++t) {
viterbiStep<<<B * N, kBlockSize, 0, stream>>>(
N, trans, alpha + (t - 1) * B * N, alpha + t * B * N, beta + t * B * N);
}
return 0;
}
} // namespace cuda
} // namespace w2l
|
df69c6a715f20110ce45c1b844ef1df4c91d9805.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/st_layer.hpp"
#include "caffe/util/benchmark.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SpatialTransformerForwardGPU(const int nthreads, int N, int C,
int output_H_, int output_W_, int H, int W,
Dtype* input_grid_data, const Dtype* U, Dtype* V) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int t = index % output_W_;
const int s = (index / output_W_) % output_H_;
const int j = (index / (output_W_ * output_H_)) % C;
const int i = index / (output_W_ * output_H_ * C);
Dtype* coordinates = input_grid_data + (output_H_ * output_W_ * 2) * i;
const int row_idx = output_W_ * s + t;
const Dtype px = coordinates[row_idx * 2];
const Dtype py = coordinates[row_idx * 2 + 1];
const int V_offset = i * (C * output_H_ * output_W_) + j * (output_H_ * output_W_)
+ s * output_W_ + t;
V[V_offset] = (Dtype)0.;
const Dtype x = (px + 1) / 2 * H;
const Dtype y = (py + 1) / 2 * W;
int m, n; Dtype w;
const Dtype* pic = U + i * (C * H * W) + j * (H * W);
m = floor(x); n = floor(y); w = 0;
if(m >= 0 && m < H && n >= 0 && n < W) {
w = (1 - (x - m)) * (1 - (y - n));
V[V_offset] += w * pic[m * W + n];
}
m = floor(x) + 1; n = floor(y); w = 0;
if(m >= 0 && m < H && n >= 0 && n < W) {
w = (1 - (m - x)) * (1 - (y - n));
V[V_offset] += w * pic[m * W + n];
}
m = floor(x); n = floor(y) + 1; w = 0;
if(m >= 0 && m < H && n >= 0 && n < W) {
w = (1 - (x - m)) * (1 - (n - y));
V[V_offset] += w * pic[m * W + n];
}
m = floor(x) + 1; n = floor(y) + 1; w = 0;
if(m >= 0 && m < H && n >= 0 && n < W) {
w = (1 - (m - x)) * (1 - (n - y));
V[V_offset] += w * pic[m * W + n];
}
}
}
template <typename Dtype>
void SpatialTransformerLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
string prefix = "SpatialTransformerLayer::Forward_gpu::\t";
const Dtype* U = bottom[0]->gpu_data();
const Dtype* theta = bottom[1]->gpu_data();
const Dtype* output_grid_data = output_grid->gpu_data();
Dtype* input_grid_data = input_grid->mutable_gpu_data();
Dtype* V = top[0]->mutable_gpu_data();
caffe_gpu_set(input_grid->count(), (Dtype)0, input_grid_data);
caffe_gpu_set(top[0]->count(), (Dtype)0, V);
// compute out input_grid_data
for(int i = 0; i < N; ++i) {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, output_H_ * output_W_, 2, 3, (Dtype)1.,
output_grid_data, theta + 6 * i, (Dtype)0.,
input_grid_data + (output_H_ * output_W_ * 2) * i);
}
const int nthreads = N * C * output_H_ * output_W_;
hipLaunchKernelGGL(( SpatialTransformerForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3( CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, N, C, output_H_, output_W_, H, W, input_grid_data, U, V);
}
template <typename Dtype>
__global__ void SpatialTransformerBackwardGPU(const int nthreads, int C,
int output_H_, int output_W_, int H, int W,
const Dtype* input_grid_data, const Dtype* dV_array, const Dtype* U_array,
Dtype* dU_tmp_diff, Dtype* dTheta_tmp_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int t = index % output_W_;
const int s = (index / output_W_) % output_H_;
const int j = (index / (output_W_ * output_H_)) % C;
const int i = index / (output_W_ * output_H_ * C);
const Dtype* coordinates = input_grid_data + (output_H_ * output_W_ * 2) * i;
const int row_idx = output_W_ * s + t;
const Dtype px = coordinates[row_idx * 2];
const Dtype py = coordinates[row_idx * 2 + 1];
Dtype delta_dpx = (Dtype)0.;
Dtype delta_dpy = (Dtype)0.;
const Dtype x = (px + 1) / 2 * H;
const Dtype y = (py + 1) / 2 * W;
const int dV_offset = i * (C * output_H_ * output_W_) + j * (output_H_ * output_W_)
+ s * output_W_ + t;
const int dU_tmp_diff_offset = i * (C * H * W) + j * (H * W);
const Dtype dV = dV_array[dV_offset];
int m, n; Dtype w;
const Dtype* U = U_array + i * (C * H * W) + j * (H * W);
// left-bottom neighbor
m = floor(x); n = floor(y); w = 0;
if(m >= 0 && m < H && n >= 0 && n < W) {
w = (1 - (x - m)) * (1 - (y - n));
int tmp_offset = (dU_tmp_diff_offset + m * W + n) * (output_H_ * output_W_) + row_idx;
dU_tmp_diff[tmp_offset] += w * dV;
delta_dpx -= (1 - (y - n)) * U[m * W + n] * dV * H / 2;
delta_dpy -= (1 - (x - m)) * U[m * W + n] * dV * W / 2;
}
// left-top neighbor
m = floor(x); n = floor(y) + 1; w = 0;
if(m >= 0 && m < H && n >= 0 && n < W) {
w = (1 - (x - m)) * (1 - (n - y));
int tmp_offset = (dU_tmp_diff_offset + m * W + n) * (output_H_ * output_W_) + row_idx;
dU_tmp_diff[tmp_offset] += w * dV;
delta_dpx -= (1 - (n - y)) * U[m * W + n] * dV * H / 2;
delta_dpy += (1 - (x - m)) * U[m * W + n] * dV * W / 2;
}
// right-bottom neighbor
m = floor(x) + 1; n = floor(y); w = 0;
if(m >= 0 && m < H && n >= 0 && n < W) {
w = (1 - (m - x)) * (1 - (y - n));
int tmp_offset = (dU_tmp_diff_offset + m * W + n) * (output_H_ * output_W_) + row_idx;
dU_tmp_diff[tmp_offset] += w * dV;
delta_dpx += (1 - (y - n)) * U[m * W + n] * dV * H / 2;
delta_dpy -= (1 - (m - x)) * U[m * W + n] * dV * W / 2;
}
// right-top neighbor
m = floor(x) + 1; n = floor(y) + 1; w = 0;
if(m >= 0 && m < H && n >= 0 && n < W) {
w = (1 - (m - x)) * (1 - (n - y));
int tmp_offset = (dU_tmp_diff_offset + m * W + n) * (output_H_ * output_W_) + row_idx;
dU_tmp_diff[tmp_offset] += w * dV;
delta_dpx += (1 - (n - y)) * U[m * W + n] * dV * H / 2;
delta_dpy += (1 - (m - x)) * U[m * W + n] * dV * W / 2;
}
int idx = j * (output_H_ * output_W_) + s * output_W_ + t;
dTheta_tmp_diff[(6 * i) * (output_H_ * output_W_ * C) + idx] += delta_dpx * (s * 1.0 / output_H_ * 2 - 1);
dTheta_tmp_diff[(6 * i + 1) * (output_H_ * output_W_ * C) + idx] += delta_dpx * (t * 1.0 / output_W_ * 2 - 1);
dTheta_tmp_diff[(6 * i + 2) * (output_H_ * output_W_ * C) + idx] += delta_dpx;
dTheta_tmp_diff[(6 * i + 3) * (output_H_ * output_W_ * C) + idx] += delta_dpy * (s * 1.0 / output_H_ * 2 - 1);
dTheta_tmp_diff[(6 * i + 4) * (output_H_ * output_W_ * C) + idx] += delta_dpy * (t * 1.0 / output_W_ * 2 - 1);
dTheta_tmp_diff[(6 * i + 5) * (output_H_ * output_W_ * C) + idx] += delta_dpy;
}
}
template <typename Dtype>
void SpatialTransformerLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
string prefix = "SpatialTransformerLayer::Backward_GPU::\t";
const Dtype* dV = top[0]->gpu_diff();
const Dtype* input_grid_data = input_grid->gpu_data();
const Dtype* U = bottom[0]->gpu_data();
Dtype* dU = bottom[0]->mutable_gpu_diff();
Dtype* dTheta = bottom[1]->mutable_gpu_diff();
Dtype* dU_tmp_diff = dU_tmp->mutable_gpu_diff();
Dtype* dTheta_tmp_diff = dTheta_tmp->mutable_gpu_diff();
caffe_gpu_set(dU_tmp->count(), (Dtype)0., dU_tmp_diff);
caffe_gpu_set(dTheta_tmp->count(), (Dtype)0., dTheta_tmp_diff);
const int nthreads = N * C * output_H_ * output_W_;
hipLaunchKernelGGL(( SpatialTransformerBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3( CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, C, output_H_, output_W_, H, W, input_grid_data,
dV, U, dU_tmp_diff, dTheta_tmp_diff);
Dtype* all_ones_1_data = all_ones_1->mutable_gpu_data();
caffe_gpu_set(all_ones_1->count(), (Dtype)1., all_ones_1_data);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, bottom[0]->count(), 1, output_H_ * output_W_,
(Dtype)1., dU_tmp_diff, all_ones_1_data, (Dtype)0., dU);
Dtype* all_ones_2_data = all_ones_2->mutable_gpu_data();
caffe_gpu_set(all_ones_2->count(), (Dtype)1., all_ones_2_data);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, bottom[1]->count(), 1, output_H_ * output_W_ * C,
(Dtype)1., dTheta_tmp_diff, all_ones_2_data, (Dtype)0., dTheta);
}
INSTANTIATE_LAYER_GPU_FUNCS(SpatialTransformerLayer);
} // namespace caffe
|
df69c6a715f20110ce45c1b844ef1df4c91d9805.cu
|
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/st_layer.hpp"
#include "caffe/util/benchmark.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SpatialTransformerForwardGPU(const int nthreads, int N, int C,
int output_H_, int output_W_, int H, int W,
Dtype* input_grid_data, const Dtype* U, Dtype* V) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int t = index % output_W_;
const int s = (index / output_W_) % output_H_;
const int j = (index / (output_W_ * output_H_)) % C;
const int i = index / (output_W_ * output_H_ * C);
Dtype* coordinates = input_grid_data + (output_H_ * output_W_ * 2) * i;
const int row_idx = output_W_ * s + t;
const Dtype px = coordinates[row_idx * 2];
const Dtype py = coordinates[row_idx * 2 + 1];
const int V_offset = i * (C * output_H_ * output_W_) + j * (output_H_ * output_W_)
+ s * output_W_ + t;
V[V_offset] = (Dtype)0.;
const Dtype x = (px + 1) / 2 * H;
const Dtype y = (py + 1) / 2 * W;
int m, n; Dtype w;
const Dtype* pic = U + i * (C * H * W) + j * (H * W);
m = floor(x); n = floor(y); w = 0;
if(m >= 0 && m < H && n >= 0 && n < W) {
w = (1 - (x - m)) * (1 - (y - n));
V[V_offset] += w * pic[m * W + n];
}
m = floor(x) + 1; n = floor(y); w = 0;
if(m >= 0 && m < H && n >= 0 && n < W) {
w = (1 - (m - x)) * (1 - (y - n));
V[V_offset] += w * pic[m * W + n];
}
m = floor(x); n = floor(y) + 1; w = 0;
if(m >= 0 && m < H && n >= 0 && n < W) {
w = (1 - (x - m)) * (1 - (n - y));
V[V_offset] += w * pic[m * W + n];
}
m = floor(x) + 1; n = floor(y) + 1; w = 0;
if(m >= 0 && m < H && n >= 0 && n < W) {
w = (1 - (m - x)) * (1 - (n - y));
V[V_offset] += w * pic[m * W + n];
}
}
}
template <typename Dtype>
void SpatialTransformerLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
string prefix = "SpatialTransformerLayer::Forward_gpu::\t";
const Dtype* U = bottom[0]->gpu_data();
const Dtype* theta = bottom[1]->gpu_data();
const Dtype* output_grid_data = output_grid->gpu_data();
Dtype* input_grid_data = input_grid->mutable_gpu_data();
Dtype* V = top[0]->mutable_gpu_data();
caffe_gpu_set(input_grid->count(), (Dtype)0, input_grid_data);
caffe_gpu_set(top[0]->count(), (Dtype)0, V);
// compute out input_grid_data
for(int i = 0; i < N; ++i) {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, output_H_ * output_W_, 2, 3, (Dtype)1.,
output_grid_data, theta + 6 * i, (Dtype)0.,
input_grid_data + (output_H_ * output_W_ * 2) * i);
}
const int nthreads = N * C * output_H_ * output_W_;
SpatialTransformerForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, N, C, output_H_, output_W_, H, W, input_grid_data, U, V);
}
template <typename Dtype>
__global__ void SpatialTransformerBackwardGPU(const int nthreads, int C,
int output_H_, int output_W_, int H, int W,
const Dtype* input_grid_data, const Dtype* dV_array, const Dtype* U_array,
Dtype* dU_tmp_diff, Dtype* dTheta_tmp_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int t = index % output_W_;
const int s = (index / output_W_) % output_H_;
const int j = (index / (output_W_ * output_H_)) % C;
const int i = index / (output_W_ * output_H_ * C);
const Dtype* coordinates = input_grid_data + (output_H_ * output_W_ * 2) * i;
const int row_idx = output_W_ * s + t;
const Dtype px = coordinates[row_idx * 2];
const Dtype py = coordinates[row_idx * 2 + 1];
Dtype delta_dpx = (Dtype)0.;
Dtype delta_dpy = (Dtype)0.;
const Dtype x = (px + 1) / 2 * H;
const Dtype y = (py + 1) / 2 * W;
const int dV_offset = i * (C * output_H_ * output_W_) + j * (output_H_ * output_W_)
+ s * output_W_ + t;
const int dU_tmp_diff_offset = i * (C * H * W) + j * (H * W);
const Dtype dV = dV_array[dV_offset];
int m, n; Dtype w;
const Dtype* U = U_array + i * (C * H * W) + j * (H * W);
// left-bottom neighbor
m = floor(x); n = floor(y); w = 0;
if(m >= 0 && m < H && n >= 0 && n < W) {
w = (1 - (x - m)) * (1 - (y - n));
int tmp_offset = (dU_tmp_diff_offset + m * W + n) * (output_H_ * output_W_) + row_idx;
dU_tmp_diff[tmp_offset] += w * dV;
delta_dpx -= (1 - (y - n)) * U[m * W + n] * dV * H / 2;
delta_dpy -= (1 - (x - m)) * U[m * W + n] * dV * W / 2;
}
// left-top neighbor
m = floor(x); n = floor(y) + 1; w = 0;
if(m >= 0 && m < H && n >= 0 && n < W) {
w = (1 - (x - m)) * (1 - (n - y));
int tmp_offset = (dU_tmp_diff_offset + m * W + n) * (output_H_ * output_W_) + row_idx;
dU_tmp_diff[tmp_offset] += w * dV;
delta_dpx -= (1 - (n - y)) * U[m * W + n] * dV * H / 2;
delta_dpy += (1 - (x - m)) * U[m * W + n] * dV * W / 2;
}
// right-bottom neighbor
m = floor(x) + 1; n = floor(y); w = 0;
if(m >= 0 && m < H && n >= 0 && n < W) {
w = (1 - (m - x)) * (1 - (y - n));
int tmp_offset = (dU_tmp_diff_offset + m * W + n) * (output_H_ * output_W_) + row_idx;
dU_tmp_diff[tmp_offset] += w * dV;
delta_dpx += (1 - (y - n)) * U[m * W + n] * dV * H / 2;
delta_dpy -= (1 - (m - x)) * U[m * W + n] * dV * W / 2;
}
// right-top neighbor
m = floor(x) + 1; n = floor(y) + 1; w = 0;
if(m >= 0 && m < H && n >= 0 && n < W) {
w = (1 - (m - x)) * (1 - (n - y));
int tmp_offset = (dU_tmp_diff_offset + m * W + n) * (output_H_ * output_W_) + row_idx;
dU_tmp_diff[tmp_offset] += w * dV;
delta_dpx += (1 - (n - y)) * U[m * W + n] * dV * H / 2;
delta_dpy += (1 - (m - x)) * U[m * W + n] * dV * W / 2;
}
int idx = j * (output_H_ * output_W_) + s * output_W_ + t;
dTheta_tmp_diff[(6 * i) * (output_H_ * output_W_ * C) + idx] += delta_dpx * (s * 1.0 / output_H_ * 2 - 1);
dTheta_tmp_diff[(6 * i + 1) * (output_H_ * output_W_ * C) + idx] += delta_dpx * (t * 1.0 / output_W_ * 2 - 1);
dTheta_tmp_diff[(6 * i + 2) * (output_H_ * output_W_ * C) + idx] += delta_dpx;
dTheta_tmp_diff[(6 * i + 3) * (output_H_ * output_W_ * C) + idx] += delta_dpy * (s * 1.0 / output_H_ * 2 - 1);
dTheta_tmp_diff[(6 * i + 4) * (output_H_ * output_W_ * C) + idx] += delta_dpy * (t * 1.0 / output_W_ * 2 - 1);
dTheta_tmp_diff[(6 * i + 5) * (output_H_ * output_W_ * C) + idx] += delta_dpy;
}
}
template <typename Dtype>
void SpatialTransformerLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
string prefix = "SpatialTransformerLayer::Backward_GPU::\t";
const Dtype* dV = top[0]->gpu_diff();
const Dtype* input_grid_data = input_grid->gpu_data();
const Dtype* U = bottom[0]->gpu_data();
Dtype* dU = bottom[0]->mutable_gpu_diff();
Dtype* dTheta = bottom[1]->mutable_gpu_diff();
Dtype* dU_tmp_diff = dU_tmp->mutable_gpu_diff();
Dtype* dTheta_tmp_diff = dTheta_tmp->mutable_gpu_diff();
caffe_gpu_set(dU_tmp->count(), (Dtype)0., dU_tmp_diff);
caffe_gpu_set(dTheta_tmp->count(), (Dtype)0., dTheta_tmp_diff);
const int nthreads = N * C * output_H_ * output_W_;
SpatialTransformerBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, C, output_H_, output_W_, H, W, input_grid_data,
dV, U, dU_tmp_diff, dTheta_tmp_diff);
Dtype* all_ones_1_data = all_ones_1->mutable_gpu_data();
caffe_gpu_set(all_ones_1->count(), (Dtype)1., all_ones_1_data);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, bottom[0]->count(), 1, output_H_ * output_W_,
(Dtype)1., dU_tmp_diff, all_ones_1_data, (Dtype)0., dU);
Dtype* all_ones_2_data = all_ones_2->mutable_gpu_data();
caffe_gpu_set(all_ones_2->count(), (Dtype)1., all_ones_2_data);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, bottom[1]->count(), 1, output_H_ * output_W_ * C,
(Dtype)1., dTheta_tmp_diff, all_ones_2_data, (Dtype)0., dTheta);
}
INSTANTIATE_LAYER_GPU_FUNCS(SpatialTransformerLayer);
} // namespace caffe
|
459f6b97a5673fc9d586584116a176429f170f73.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "LinearAlgebraStructs.h"
__global__
void findMaximum_kernel(matrix a_d)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int columnIndex = i;
if (columnIndex < a_d.columns) {//make sure not to write outside of matrix, incase the number of elements did not have a base of 1024
double largestValue = -1;
int largestIndex = 0;
for (int rowIndex = 0; rowIndex < a_d.rows; ++rowIndex) {
if (a_d.elements[MATRIX_INDEX(rowIndex, columnIndex, a_d)]>largestValue) {
a_d.elements[MATRIX_INDEX(largestIndex, columnIndex, a_d)] = 0;
largestIndex = rowIndex;
largestValue = a_d.elements[MATRIX_INDEX(rowIndex, columnIndex, a_d)];
a_d.elements[MATRIX_INDEX(largestIndex, columnIndex, a_d)] = 1;
}
else
{
a_d.elements[MATRIX_INDEX(rowIndex, columnIndex, a_d)] = 0;
}
}
}
}
/**
*creates a projection matrix on the gpu to the given matrix on the device.
**/
extern "C" void findMaximum_cuda(matrix a_d) {
int N = a_d.columns; //each thread handles one column each
findMaximum_kernel << <(N + 255) / 256, 256 >> > (a_d);
}
|
459f6b97a5673fc9d586584116a176429f170f73.cu
|
#include "LinearAlgebraStructs.h"
__global__
void findMaximum_kernel(matrix a_d)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int columnIndex = i;
if (columnIndex < a_d.columns) {//make sure not to write outside of matrix, incase the number of elements did not have a base of 1024
double largestValue = -1;
int largestIndex = 0;
for (int rowIndex = 0; rowIndex < a_d.rows; ++rowIndex) {
if (a_d.elements[MATRIX_INDEX(rowIndex, columnIndex, a_d)]>largestValue) {
a_d.elements[MATRIX_INDEX(largestIndex, columnIndex, a_d)] = 0;
largestIndex = rowIndex;
largestValue = a_d.elements[MATRIX_INDEX(rowIndex, columnIndex, a_d)];
a_d.elements[MATRIX_INDEX(largestIndex, columnIndex, a_d)] = 1;
}
else
{
a_d.elements[MATRIX_INDEX(rowIndex, columnIndex, a_d)] = 0;
}
}
}
}
/**
*creates a projection matrix on the gpu to the given matrix on the device.
**/
extern "C" void findMaximum_cuda(matrix a_d) {
int N = a_d.columns; //each thread handles one column each
findMaximum_kernel << <(N + 255) / 256, 256 >> > (a_d);
}
|
5c61cfe666512d363d6bae7d16e02a96cc04b045.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2023 CMU, Facebook, LANL, MIT, NVIDIA, and Stanford (alphabetical)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "flexflow/ops/gather.h"
#include "flexflow/ops/kernels/gather_kernels.h"
#include "flexflow/utils/cuda_helper.h"
namespace FlexFlow {
// declare Legion names
using Legion::coord_t;
using Legion::Domain;
GatherMeta::GatherMeta(FFHandler handler, Gather const *gather)
: OpMeta(handler, gather) {
legion_dim = gather->legion_dim;
}
namespace Kernels {
namespace Gather {
void forward_kernel_wrapper(GatherMeta const *m,
GenericTensorAccessorR const &input,
GenericTensorAccessorR const &index,
GenericTensorAccessorW const &output) {
hipStream_t stream;
checkCUDA(get_legion_stream(&stream));
coord_t stride = 1;
for (int i = 0; i < m->legion_dim; i++) {
stride *= (output.domain.hi()[i] - output.domain.lo()[i] + 1);
}
coord_t output_dim_size =
output.domain.hi()[m->legion_dim] - output.domain.lo()[m->legion_dim] + 1;
coord_t input_dim_size =
input.domain.hi()[m->legion_dim] - input.domain.lo()[m->legion_dim] + 1;
if (index.data_type == DT_INT32) {
Internal::forward_kernel(input.get_float_ptr(),
index.get_int32_ptr(),
output.get_float_ptr(),
output.domain.get_volume(),
stride,
input_dim_size,
output_dim_size,
stream);
} else {
assert(index.data_type == DT_INT64);
Internal::forward_kernel(input.get_float_ptr(),
index.get_int64_ptr(),
output.get_float_ptr(),
output.domain.get_volume(),
stride,
input_dim_size,
output_dim_size,
stream);
}
}
void backward_kernel_wrapper(GatherMeta const *m,
GenericTensorAccessorR const &output_grad,
GenericTensorAccessorR const &index,
GenericTensorAccessorW const &input_grad) {
hipStream_t stream;
checkCUDA(get_legion_stream(&stream));
coord_t stride = 1;
for (int i = 0; i < m->legion_dim; i++) {
stride *= (output_grad.domain.hi()[i] - output_grad.domain.lo()[i] + 1);
}
coord_t output_dim_size = output_grad.domain.hi()[m->legion_dim] -
output_grad.domain.lo()[m->legion_dim] + 1;
coord_t input_dim_size = input_grad.domain.hi()[m->legion_dim] -
input_grad.domain.lo()[m->legion_dim] + 1;
if (index.data_type == DT_INT32) {
Internal::backward_kernel(output_grad.get_float_ptr(),
index.get_int32_ptr(),
input_grad.get_float_ptr(),
output_grad.domain.get_volume(),
stride,
input_dim_size,
output_dim_size,
stream);
} else {
assert(index.data_type == DT_INT64);
Internal::backward_kernel(output_grad.get_float_ptr(),
index.get_int64_ptr(),
input_grad.get_float_ptr(),
output_grad.domain.get_volume(),
stride,
input_dim_size,
output_dim_size,
stream);
}
}
namespace Internal {
template <typename IndexType>
__global__ void gather_forward(float const *input,
IndexType const *index,
float *output,
coord_t output_size,
coord_t stride,
coord_t input_dim_size,
coord_t output_dim_size) {
CUDA_KERNEL_LOOP(o, output_size) {
// output tensor shape: [*, output_dim_size, stride]
// output tensor stride: [output_dim_size * stride, stride, 1]
// output tensor index: [outter_index, index_2, left_over]
// input tensor shape: [*, input_dim_size, stride]
// input tensor stride: [input_dim_size * stride, stride, 1]
// the index of the corresponding input tensor should be:
// [outter_index, index[0], left_over]
// Therefore, input_index = outter_index * (stride * input_dim_size)
// + index[0] * stride + left_over;
coord_t outter_index = o / (stride * output_dim_size);
// coord_t index_2 = (o / stride) % dim_size
coord_t left_over = o % stride;
coord_t input_idx = outter_index * (stride * input_dim_size) +
index[o] * stride + left_over;
output[o] = input[input_idx];
}
}
template <typename IndexType>
void forward_kernel(float const *input_ptr,
IndexType const *index_ptr,
float *output_ptr,
coord_t output_size,
coord_t stride,
coord_t input_dim_size,
coord_t output_dim_size,
hipStream_t stream) {
assert(input_ptr != nullptr);
assert(index_ptr != nullptr);
assert(output_ptr != nullptr);
hipLaunchKernelGGL(( gather_forward<IndexType>)
, dim3(GET_BLOCKS(output_size)), dim3(CUDA_NUM_THREADS), 0, stream,
input_ptr,
index_ptr,
output_ptr,
output_size,
stride,
input_dim_size,
output_dim_size);
}
template <typename IndexType>
__global__ void gather_backward(float const *output_grad,
IndexType const *index,
float *input_grad,
coord_t output_size,
coord_t stride,
coord_t input_dim_size,
coord_t output_dim_size) {
CUDA_KERNEL_LOOP(o, output_size) {
// output tensor shape: [*, output_dim_size, stride]
// output tensor stride: [output_dim_size * stride, stride, 1]
// output tensor index: [outter_index, index_2, left_over]
// input tensor shape: [*, input_dim_size, stride]
// input tensor stride: [input_dim_size * stride, stride, 1]
// the index of the corresponding input tensor should be:
// [outter_index, index[0], left_over]
// Therefore, input_index = outter_index * (stride * input_dim_size)
// + index[0] * stride + left_over;
coord_t outter_index = o / (stride * output_dim_size);
// coord_t index_2 = (o / stride) % dim_size
coord_t left_over = o % stride;
coord_t input_idx = outter_index * (stride * input_dim_size) +
index[o] * stride + left_over;
atomicAdd(&input_grad[input_idx], output_grad[o]);
}
}
template <typename IndexType>
void backward_kernel(float const *output_grad_ptr,
IndexType const *index_ptr,
float *input_grad_ptr,
coord_t output_size,
coord_t stride,
coord_t input_dim_size,
coord_t output_dim_size,
hipStream_t stream) {
assert(output_grad_ptr != nullptr);
assert(input_grad_ptr != nullptr);
assert(index_ptr != nullptr);
hipLaunchKernelGGL(( gather_backward<IndexType>)
, dim3(GET_BLOCKS(output_size)), dim3(CUDA_NUM_THREADS), 0, stream,
output_grad_ptr,
index_ptr,
input_grad_ptr,
output_size,
stride,
input_dim_size,
output_dim_size);
}
} // namespace Internal
} // namespace Gather
} // namespace Kernels
}; // namespace FlexFlow
|
5c61cfe666512d363d6bae7d16e02a96cc04b045.cu
|
/* Copyright 2023 CMU, Facebook, LANL, MIT, NVIDIA, and Stanford (alphabetical)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "flexflow/ops/gather.h"
#include "flexflow/ops/kernels/gather_kernels.h"
#include "flexflow/utils/cuda_helper.h"
namespace FlexFlow {
// declare Legion names
using Legion::coord_t;
using Legion::Domain;
GatherMeta::GatherMeta(FFHandler handler, Gather const *gather)
: OpMeta(handler, gather) {
legion_dim = gather->legion_dim;
}
namespace Kernels {
namespace Gather {
void forward_kernel_wrapper(GatherMeta const *m,
GenericTensorAccessorR const &input,
GenericTensorAccessorR const &index,
GenericTensorAccessorW const &output) {
cudaStream_t stream;
checkCUDA(get_legion_stream(&stream));
coord_t stride = 1;
for (int i = 0; i < m->legion_dim; i++) {
stride *= (output.domain.hi()[i] - output.domain.lo()[i] + 1);
}
coord_t output_dim_size =
output.domain.hi()[m->legion_dim] - output.domain.lo()[m->legion_dim] + 1;
coord_t input_dim_size =
input.domain.hi()[m->legion_dim] - input.domain.lo()[m->legion_dim] + 1;
if (index.data_type == DT_INT32) {
Internal::forward_kernel(input.get_float_ptr(),
index.get_int32_ptr(),
output.get_float_ptr(),
output.domain.get_volume(),
stride,
input_dim_size,
output_dim_size,
stream);
} else {
assert(index.data_type == DT_INT64);
Internal::forward_kernel(input.get_float_ptr(),
index.get_int64_ptr(),
output.get_float_ptr(),
output.domain.get_volume(),
stride,
input_dim_size,
output_dim_size,
stream);
}
}
void backward_kernel_wrapper(GatherMeta const *m,
GenericTensorAccessorR const &output_grad,
GenericTensorAccessorR const &index,
GenericTensorAccessorW const &input_grad) {
cudaStream_t stream;
checkCUDA(get_legion_stream(&stream));
coord_t stride = 1;
for (int i = 0; i < m->legion_dim; i++) {
stride *= (output_grad.domain.hi()[i] - output_grad.domain.lo()[i] + 1);
}
coord_t output_dim_size = output_grad.domain.hi()[m->legion_dim] -
output_grad.domain.lo()[m->legion_dim] + 1;
coord_t input_dim_size = input_grad.domain.hi()[m->legion_dim] -
input_grad.domain.lo()[m->legion_dim] + 1;
if (index.data_type == DT_INT32) {
Internal::backward_kernel(output_grad.get_float_ptr(),
index.get_int32_ptr(),
input_grad.get_float_ptr(),
output_grad.domain.get_volume(),
stride,
input_dim_size,
output_dim_size,
stream);
} else {
assert(index.data_type == DT_INT64);
Internal::backward_kernel(output_grad.get_float_ptr(),
index.get_int64_ptr(),
input_grad.get_float_ptr(),
output_grad.domain.get_volume(),
stride,
input_dim_size,
output_dim_size,
stream);
}
}
namespace Internal {
template <typename IndexType>
__global__ void gather_forward(float const *input,
IndexType const *index,
float *output,
coord_t output_size,
coord_t stride,
coord_t input_dim_size,
coord_t output_dim_size) {
CUDA_KERNEL_LOOP(o, output_size) {
// output tensor shape: [*, output_dim_size, stride]
// output tensor stride: [output_dim_size * stride, stride, 1]
// output tensor index: [outter_index, index_2, left_over]
// input tensor shape: [*, input_dim_size, stride]
// input tensor stride: [input_dim_size * stride, stride, 1]
// the index of the corresponding input tensor should be:
// [outter_index, index[0], left_over]
// Therefore, input_index = outter_index * (stride * input_dim_size)
// + index[0] * stride + left_over;
coord_t outter_index = o / (stride * output_dim_size);
// coord_t index_2 = (o / stride) % dim_size
coord_t left_over = o % stride;
coord_t input_idx = outter_index * (stride * input_dim_size) +
index[o] * stride + left_over;
output[o] = input[input_idx];
}
}
template <typename IndexType>
void forward_kernel(float const *input_ptr,
IndexType const *index_ptr,
float *output_ptr,
coord_t output_size,
coord_t stride,
coord_t input_dim_size,
coord_t output_dim_size,
cudaStream_t stream) {
assert(input_ptr != nullptr);
assert(index_ptr != nullptr);
assert(output_ptr != nullptr);
gather_forward<IndexType>
<<<GET_BLOCKS(output_size), CUDA_NUM_THREADS, 0, stream>>>(
input_ptr,
index_ptr,
output_ptr,
output_size,
stride,
input_dim_size,
output_dim_size);
}
template <typename IndexType>
__global__ void gather_backward(float const *output_grad,
IndexType const *index,
float *input_grad,
coord_t output_size,
coord_t stride,
coord_t input_dim_size,
coord_t output_dim_size) {
CUDA_KERNEL_LOOP(o, output_size) {
// output tensor shape: [*, output_dim_size, stride]
// output tensor stride: [output_dim_size * stride, stride, 1]
// output tensor index: [outter_index, index_2, left_over]
// input tensor shape: [*, input_dim_size, stride]
// input tensor stride: [input_dim_size * stride, stride, 1]
// the index of the corresponding input tensor should be:
// [outter_index, index[0], left_over]
// Therefore, input_index = outter_index * (stride * input_dim_size)
// + index[0] * stride + left_over;
coord_t outter_index = o / (stride * output_dim_size);
// coord_t index_2 = (o / stride) % dim_size
coord_t left_over = o % stride;
coord_t input_idx = outter_index * (stride * input_dim_size) +
index[o] * stride + left_over;
atomicAdd(&input_grad[input_idx], output_grad[o]);
}
}
template <typename IndexType>
void backward_kernel(float const *output_grad_ptr,
IndexType const *index_ptr,
float *input_grad_ptr,
coord_t output_size,
coord_t stride,
coord_t input_dim_size,
coord_t output_dim_size,
cudaStream_t stream) {
assert(output_grad_ptr != nullptr);
assert(input_grad_ptr != nullptr);
assert(index_ptr != nullptr);
gather_backward<IndexType>
<<<GET_BLOCKS(output_size), CUDA_NUM_THREADS, 0, stream>>>(
output_grad_ptr,
index_ptr,
input_grad_ptr,
output_size,
stride,
input_dim_size,
output_dim_size);
}
} // namespace Internal
} // namespace Gather
} // namespace Kernels
}; // namespace FlexFlow
|
7271aca6737eb6710ec7b194a33ba7bc241d9cf4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../GpuCurve.h"
#include "../GpuDelaunay.h"
#include <iomanip>
#include <iostream>
#include <ctime>
#include "KerCommon.h"
#include "KerDivision.h"
#include "KerPredicates.h"
#include "ThrustWrapper.h"
#define GRID_STRIDE_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
// NOTE: nvcc segfaults if these are included
// #include <CGAL/Delaunay_triangulation_2.h>
// #include <CGAL/Voronoi_diagram_2.h>
__device__ double square(double x) { return x * x; }
__device__ double determinant(double a00, double a01, double a10, double a11) {
return a00 * a11 - a10 * a01;
}
__device__ Point2 circumcenter(double coord[3][2]) {
double dqx = coord[1][0] - coord[0][0];
double drx = coord[2][0] - coord[0][0];
double dqy = coord[1][1] - coord[0][1];
double dry = coord[2][1] - coord[0][1];
double r2 = square(drx) + square(dry);
double q2 = square(dqx) + square(dqy);
double den = 2 * determinant(dqx, dqy, drx, dry);
double dcx = determinant(dry, dqy, r2, q2) / den;
double dcy = -determinant(drx, dqx, r2, q2) / den;
return Point2{{dcx + coord[0][0], dcy + coord[0][1]}};
}
__global__ void DT2VDVertices(KerPoint2Array points, KerTriArray input,
Point2 *output) {
GRID_STRIDE_LOOP(index, input._num) {
const Tri tri = input._arr[index];
double coord[3][2];
for (int i = 0; i < 3; i++) {
assert(tri._v[i] < points._num);
const Point2 point = points._arr[tri._v[i]];
coord[i][0] = point._p[0];
coord[i][1] = point._p[1];
}
output[index] = circumcenter(coord);
}
}
struct isGoodTri {
int infId;
__host__ __device__ bool operator()(const Tri tri) {
for (int i = 0; i < 3; i++) {
if (tri._v[i] >= infId)
return false;
}
return true;
}
};
void extractCrust(int s_range, const TriHVec &input, SegmentHVec &output) {
for (auto it = input.begin(); it != input.end(); it++) {
const Tri tri = *it;
if (tri._v[0] < s_range && tri._v[1] < s_range)
output.push_back(Segment{tri._v[0], tri._v[1]});
if (tri._v[1] < s_range && tri._v[2] < s_range)
output.push_back(Segment{tri._v[1], tri._v[2]});
if (tri._v[2] < s_range && tri._v[0] < s_range)
output.push_back(Segment{tri._v[2], tri._v[0]});
}
}
void GpuCurve::compute(const GCurve2DInput &input, GCurve2DOutput *output) {
// Let S be a finite set of points in the plane.
// move input from CPU to GPU
clock_t time;
time = clock();
_s_points.copyFromHost(input.pointVec);
// std::cout << ((float)(clock() - time))/CLOCKS_PER_SEC << " seconds \t _s_points.copyFromHost(input.pointVec);" << std::endl;
// Let V be the vertices of the Voronoi diagram of S.
// Compute DT
// CPU input GPU output
GDel2DInputGPU dt1Input{GDel2DInput{}, &_s_points};
GDel2DOutputGPU dt1Output;
time = clock();
_v_gDel.computeGPU(dt1Input, &dt1Output);
// std::cout << ((float)(clock() - time))/CLOCKS_PER_SEC << " seconds \t _v_gDel.computeGPU(dt1Input, &dt1Output);" << std::endl;
// filter out trash triangles
TriDVec goodTris;
time = clock();
goodTris.resize(dt1Output.triVec.size());
auto it = thrust::copy_if(dt1Output.triVec.begin(), dt1Output.triVec.end(),
goodTris.begin(),
isGoodTri{static_cast<int>(_s_points.size())});
goodTris.resize(it - goodTris.begin());
// std::cout << ((float)(clock() - time))/CLOCKS_PER_SEC << " seconds \t resize copy_if resize" << std::endl;
// convert to VD: compute circumcenter of triangles in GPU
_v_points.resize(goodTris.size());
time = clock();
hipLaunchKernelGGL(( DT2VDVertices), dim3(BlocksPerGrid), dim3(ThreadsPerBlock), 0, 0, toKernelArray(_s_points),
toKernelArray(goodTris),
toKernelPtr(_v_points));
// std::cout << ((float)(clock() - time))/CLOCKS_PER_SEC << " seconds \t DR2VDVertices" << std::endl;
CudaCheckError();
// Let D be the Delaunay triangulation of SV.
time = clock();
_sv_points.copyFrom2(_s_points, _v_points);
// std::cout << ((float)(clock() - time))/CLOCKS_PER_SEC << " seconds \t _sv_points.copyFrom2(_s_points, _v_points);" << std::endl;
GDel2DInputGPU dt2Input{GDel2DInput{}, &_sv_points};
GDel2DOutputGPU dt2Output;
time = clock();
_sv_gDel.computeGPU(dt2Input, &dt2Output);
// std::cout << ((float)(clock() - time))/CLOCKS_PER_SEC << " seconds \t _sv_gDel.computeGPU(dt2Input, &dt2Output);" << std::endl;
// An edge of D belongs to the crust of S if both its endpoints belong to S
// movo to cpu and extract crust
TriHVec suv_tris;
time = clock();
dt2Output.triVec.copyToHost(suv_tris);
extractCrust(_s_points.size(), suv_tris, output->segmentVec);
// std::cout << ((float)(clock() - time))/CLOCKS_PER_SEC << " seconds \t copyToHost, extractCrust" << std::endl;
}
|
7271aca6737eb6710ec7b194a33ba7bc241d9cf4.cu
|
#include "../GpuCurve.h"
#include "../GpuDelaunay.h"
#include <iomanip>
#include <iostream>
#include <ctime>
#include "KerCommon.h"
#include "KerDivision.h"
#include "KerPredicates.h"
#include "ThrustWrapper.h"
#define GRID_STRIDE_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
// NOTE: nvcc segfaults if these are included
// #include <CGAL/Delaunay_triangulation_2.h>
// #include <CGAL/Voronoi_diagram_2.h>
__device__ double square(double x) { return x * x; }
__device__ double determinant(double a00, double a01, double a10, double a11) {
return a00 * a11 - a10 * a01;
}
__device__ Point2 circumcenter(double coord[3][2]) {
double dqx = coord[1][0] - coord[0][0];
double drx = coord[2][0] - coord[0][0];
double dqy = coord[1][1] - coord[0][1];
double dry = coord[2][1] - coord[0][1];
double r2 = square(drx) + square(dry);
double q2 = square(dqx) + square(dqy);
double den = 2 * determinant(dqx, dqy, drx, dry);
double dcx = determinant(dry, dqy, r2, q2) / den;
double dcy = -determinant(drx, dqx, r2, q2) / den;
return Point2{{dcx + coord[0][0], dcy + coord[0][1]}};
}
__global__ void DT2VDVertices(KerPoint2Array points, KerTriArray input,
Point2 *output) {
GRID_STRIDE_LOOP(index, input._num) {
const Tri tri = input._arr[index];
double coord[3][2];
for (int i = 0; i < 3; i++) {
assert(tri._v[i] < points._num);
const Point2 point = points._arr[tri._v[i]];
coord[i][0] = point._p[0];
coord[i][1] = point._p[1];
}
output[index] = circumcenter(coord);
}
}
struct isGoodTri {
int infId;
__host__ __device__ bool operator()(const Tri tri) {
for (int i = 0; i < 3; i++) {
if (tri._v[i] >= infId)
return false;
}
return true;
}
};
void extractCrust(int s_range, const TriHVec &input, SegmentHVec &output) {
for (auto it = input.begin(); it != input.end(); it++) {
const Tri tri = *it;
if (tri._v[0] < s_range && tri._v[1] < s_range)
output.push_back(Segment{tri._v[0], tri._v[1]});
if (tri._v[1] < s_range && tri._v[2] < s_range)
output.push_back(Segment{tri._v[1], tri._v[2]});
if (tri._v[2] < s_range && tri._v[0] < s_range)
output.push_back(Segment{tri._v[2], tri._v[0]});
}
}
void GpuCurve::compute(const GCurve2DInput &input, GCurve2DOutput *output) {
// Let S be a finite set of points in the plane.
// move input from CPU to GPU
clock_t time;
time = clock();
_s_points.copyFromHost(input.pointVec);
// std::cout << ((float)(clock() - time))/CLOCKS_PER_SEC << " seconds \t _s_points.copyFromHost(input.pointVec);" << std::endl;
// Let V be the vertices of the Voronoi diagram of S.
// Compute DT
// CPU input → GPU output
GDel2DInputGPU dt1Input{GDel2DInput{}, &_s_points};
GDel2DOutputGPU dt1Output;
time = clock();
_v_gDel.computeGPU(dt1Input, &dt1Output);
// std::cout << ((float)(clock() - time))/CLOCKS_PER_SEC << " seconds \t _v_gDel.computeGPU(dt1Input, &dt1Output);" << std::endl;
// filter out trash triangles
TriDVec goodTris;
time = clock();
goodTris.resize(dt1Output.triVec.size());
auto it = thrust::copy_if(dt1Output.triVec.begin(), dt1Output.triVec.end(),
goodTris.begin(),
isGoodTri{static_cast<int>(_s_points.size())});
goodTris.resize(it - goodTris.begin());
// std::cout << ((float)(clock() - time))/CLOCKS_PER_SEC << " seconds \t resize copy_if resize" << std::endl;
// convert to VD: compute circumcenter of triangles in GPU
_v_points.resize(goodTris.size());
time = clock();
DT2VDVertices<<<BlocksPerGrid, ThreadsPerBlock>>>(toKernelArray(_s_points),
toKernelArray(goodTris),
toKernelPtr(_v_points));
// std::cout << ((float)(clock() - time))/CLOCKS_PER_SEC << " seconds \t DR2VDVertices" << std::endl;
CudaCheckError();
// Let D be the Delaunay triangulation of S∪V.
time = clock();
_sv_points.copyFrom2(_s_points, _v_points);
// std::cout << ((float)(clock() - time))/CLOCKS_PER_SEC << " seconds \t _sv_points.copyFrom2(_s_points, _v_points);" << std::endl;
GDel2DInputGPU dt2Input{GDel2DInput{}, &_sv_points};
GDel2DOutputGPU dt2Output;
time = clock();
_sv_gDel.computeGPU(dt2Input, &dt2Output);
// std::cout << ((float)(clock() - time))/CLOCKS_PER_SEC << " seconds \t _sv_gDel.computeGPU(dt2Input, &dt2Output);" << std::endl;
// An edge of D belongs to the crust of S if both its endpoints belong to S
// movo to cpu and extract crust
TriHVec suv_tris;
time = clock();
dt2Output.triVec.copyToHost(suv_tris);
extractCrust(_s_points.size(), suv_tris, output->segmentVec);
// std::cout << ((float)(clock() - time))/CLOCKS_PER_SEC << " seconds \t copyToHost, extractCrust" << std::endl;
}
|
e084890fd9c5a4e34b3cbec8674172866bfd5340.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
int main(int argc,char** argv){
return 0;
}
|
e084890fd9c5a4e34b3cbec8674172866bfd5340.cu
|
#include <stdio.h>
#include <cuda_runtime.h>
int main(int argc,char** argv){
return 0;
}
|
342247d2117382bf1f6b641fd19bc76028eb09c3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* \file suite.cu
* \brief GPU (Cuda) and CPU code computing a simple mathematical suite (for 0<i<max_i): (1/n^i)
* \author Adrien Python
* \date 17.02.2017
*/
#include <stdlib.h>
#include <unistd.h>
#include <stdio.h>
#include <math.h>
#include <stdbool.h>
#include <libgen.h>
#ifdef COMPUTE_ON_CPU
// Tweak the code to run on CPU
#define genericMalloc(dst_ptr, type) do { *(dst_ptr) = (type*)malloc(sizeof(type)); } while(0)
#define hipMemcpy(dst, src, size, mode) memcpy(dst, src, size)
#define hipMemcpyToSymbol(dst, src, size) memcpy(&dst, src, size)
#define hipFree(ptr) free(ptr)
#define HANDLE_ERROR(ans) ans
#define HANDLE_KERNEL_ERROR(...) do { __VA_ARGS__; } while(0)
#define RUN_KERNEL(kernel, ...) kernel(__VA_ARGS__)
#else
// Code for GPU usage only
#define genericMalloc(dst_ptr, type) hipMalloc(dst_ptr, sizeof(type))
#define HANDLE_ERROR(ans) (handleError((ans), __FILE__, __LINE__))
inline void handleError(hipError_t code, const char *file, int line)
{
if (code != hipSuccess) {
fprintf(stderr,"CUDA assert: %s %s %d\n", hipGetErrorString(code), file, line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_KERNEL_ERROR(...) \
do { \
__VA_ARGS__; \
HANDLE_ERROR( hipPeekAtLastError() ); \
HANDLE_ERROR( hipDeviceSynchronize() ); \
} while(0)
#define RUN_KERNEL(kernel, ...) HANDLE_KERNEL_ERROR(hipLaunchKernelGGL(( kernel), dim3(1), dim3(1), 0, 0, __VA_ARGS__) )
#endif
#ifndef COMPUTE_ON_CPU
__device__
#endif
double powd(double x, double y)
{
double result = 1;
for (int i = 0; i < y; ++i) {
result *= x;
}
return result;
}
#ifdef COMPUTE_ON_CPU
void suite(double* d_result, long* d_n, size_t* d_i)
#else
__global__ void suite(double* d_result, long* d_n, size_t* d_i)
#endif
{
*d_result += 1.0 / powd(*d_n, *d_i);
}
int main(int argc, char * const argv[])
{
// Read arguments
if (argc != 3) {
fprintf(stderr, "usage: %s <n> <max_i>\n", basename(argv[0]));
return EXIT_FAILURE;
}
long* d_n, n = strtol(argv[1], NULL, 10);
int max_i = strtol(argv[2], NULL, 10);
double *d_result, result = 0;
HANDLE_ERROR(genericMalloc(&d_n, long));
HANDLE_ERROR(hipMemcpy(d_n, &n, sizeof(long), hipMemcpyHostToDevice));
HANDLE_ERROR(genericMalloc(&d_result, double));
HANDLE_ERROR(hipMemcpy(d_result, &result, sizeof(double), hipMemcpyHostToDevice));
size_t* d_i;
HANDLE_ERROR(genericMalloc(&d_i, size_t));
for (size_t i = 1; i < max_i; i++) {
HANDLE_ERROR(hipMemcpy(d_i, &i, sizeof(size_t), hipMemcpyHostToDevice));
RUN_KERNEL(suite, d_result, d_n, d_i);
}
HANDLE_ERROR(hipMemcpy(&result, d_result, sizeof(double), hipMemcpyDeviceToHost));
#ifdef COMPUTE_ON_CPU
printf("on cpu, with 0<i<%d: (1/%lu^i) = %.60f\n", max_i, n, result);
#else
printf("on gpu, with 0<i<%d: (1/%lu^i) = %.60f\n", max_i, n, result);
#endif
HANDLE_ERROR(hipFree(d_result));
return EXIT_SUCCESS;
}
|
342247d2117382bf1f6b641fd19bc76028eb09c3.cu
|
/*!
* \file suite.cu
* \brief GPU (Cuda) and CPU code computing a simple mathematical suite (for 0<i<max_i): Σ(1/n^i)
* \author Adrien Python
* \date 17.02.2017
*/
#include <stdlib.h>
#include <unistd.h>
#include <stdio.h>
#include <math.h>
#include <stdbool.h>
#include <libgen.h>
#ifdef COMPUTE_ON_CPU
// Tweak the code to run on CPU
#define genericMalloc(dst_ptr, type) do { *(dst_ptr) = (type*)malloc(sizeof(type)); } while(0)
#define cudaMemcpy(dst, src, size, mode) memcpy(dst, src, size)
#define cudaMemcpyToSymbol(dst, src, size) memcpy(&dst, src, size)
#define cudaFree(ptr) free(ptr)
#define HANDLE_ERROR(ans) ans
#define HANDLE_KERNEL_ERROR(...) do { __VA_ARGS__; } while(0)
#define RUN_KERNEL(kernel, ...) kernel(__VA_ARGS__)
#else
// Code for GPU usage only
#define genericMalloc(dst_ptr, type) cudaMalloc(dst_ptr, sizeof(type))
#define HANDLE_ERROR(ans) (handleError((ans), __FILE__, __LINE__))
inline void handleError(cudaError_t code, const char *file, int line)
{
if (code != cudaSuccess) {
fprintf(stderr,"CUDA assert: %s %s %d\n", cudaGetErrorString(code), file, line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_KERNEL_ERROR(...) \
do { \
__VA_ARGS__; \
HANDLE_ERROR( cudaPeekAtLastError() ); \
HANDLE_ERROR( cudaDeviceSynchronize() ); \
} while(0)
#define RUN_KERNEL(kernel, ...) HANDLE_KERNEL_ERROR( kernel<<<1, 1>>>(__VA_ARGS__) )
#endif
#ifndef COMPUTE_ON_CPU
__device__
#endif
double powd(double x, double y)
{
double result = 1;
for (int i = 0; i < y; ++i) {
result *= x;
}
return result;
}
#ifdef COMPUTE_ON_CPU
void suite(double* d_result, long* d_n, size_t* d_i)
#else
__global__ void suite(double* d_result, long* d_n, size_t* d_i)
#endif
{
*d_result += 1.0 / powd(*d_n, *d_i);
}
int main(int argc, char * const argv[])
{
// Read arguments
if (argc != 3) {
fprintf(stderr, "usage: %s <n> <max_i>\n", basename(argv[0]));
return EXIT_FAILURE;
}
long* d_n, n = strtol(argv[1], NULL, 10);
int max_i = strtol(argv[2], NULL, 10);
double *d_result, result = 0;
HANDLE_ERROR(genericMalloc(&d_n, long));
HANDLE_ERROR(cudaMemcpy(d_n, &n, sizeof(long), cudaMemcpyHostToDevice));
HANDLE_ERROR(genericMalloc(&d_result, double));
HANDLE_ERROR(cudaMemcpy(d_result, &result, sizeof(double), cudaMemcpyHostToDevice));
size_t* d_i;
HANDLE_ERROR(genericMalloc(&d_i, size_t));
for (size_t i = 1; i < max_i; i++) {
HANDLE_ERROR(cudaMemcpy(d_i, &i, sizeof(size_t), cudaMemcpyHostToDevice));
RUN_KERNEL(suite, d_result, d_n, d_i);
}
HANDLE_ERROR(cudaMemcpy(&result, d_result, sizeof(double), cudaMemcpyDeviceToHost));
#ifdef COMPUTE_ON_CPU
printf("on cpu, with 0<i<%d: Σ(1/%lu^i) = %.60f\n", max_i, n, result);
#else
printf("on gpu, with 0<i<%d: Σ(1/%lu^i) = %.60f\n", max_i, n, result);
#endif
HANDLE_ERROR(cudaFree(d_result));
return EXIT_SUCCESS;
}
|
f0ba17a067bc07ac3b6ea6ae679a33f1140470fb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernels_common.h"
__global__ void mul_veff_with_phase_factors_gpu_kernel(int num_gvec_loc__,
hipDoubleComplex const* veff__,
int const* gvec__,
double const* atom_pos__,
double* veff_a__)
{
int igloc = blockDim.x * blockIdx.x + threadIdx.x;
int ia = blockIdx.y;
if (igloc < num_gvec_loc__)
{
int gvx = gvec__[array2D_offset(0, igloc, 3)];
int gvy = gvec__[array2D_offset(1, igloc, 3)];
int gvz = gvec__[array2D_offset(2, igloc, 3)];
double ax = atom_pos__[array2D_offset(0, ia, 3)];
double ay = atom_pos__[array2D_offset(1, ia, 3)];
double az = atom_pos__[array2D_offset(2, ia, 3)];
double p = twopi * (ax * gvx + ay * gvy + az * gvz);
hipDoubleComplex z = cuConj(cuCmul(veff__[igloc], make_cuDoubleComplex(cos(p), sin(p))));
veff_a__[array2D_offset(2 * igloc, ia, 2 * num_gvec_loc__)] = z.x;
veff_a__[array2D_offset(2 * igloc + 1, ia, 2 * num_gvec_loc__)] = z.y;
}
}
extern "C" void mul_veff_with_phase_factors_gpu(int num_atoms__,
int num_gvec_loc__,
hipDoubleComplex const* veff__,
int const* gvec__,
double const* atom_pos__,
double* veff_a__)
{
dim3 grid_t(64);
dim3 grid_b(num_blocks(num_gvec_loc__, grid_t.x), num_atoms__);
hipLaunchKernelGGL(( mul_veff_with_phase_factors_gpu_kernel) , dim3(grid_b), dim3(grid_t), 0, 0,
num_gvec_loc__,
veff__,
gvec__,
atom_pos__,
veff_a__
);
}
|
f0ba17a067bc07ac3b6ea6ae679a33f1140470fb.cu
|
#include "kernels_common.h"
__global__ void mul_veff_with_phase_factors_gpu_kernel(int num_gvec_loc__,
cuDoubleComplex const* veff__,
int const* gvec__,
double const* atom_pos__,
double* veff_a__)
{
int igloc = blockDim.x * blockIdx.x + threadIdx.x;
int ia = blockIdx.y;
if (igloc < num_gvec_loc__)
{
int gvx = gvec__[array2D_offset(0, igloc, 3)];
int gvy = gvec__[array2D_offset(1, igloc, 3)];
int gvz = gvec__[array2D_offset(2, igloc, 3)];
double ax = atom_pos__[array2D_offset(0, ia, 3)];
double ay = atom_pos__[array2D_offset(1, ia, 3)];
double az = atom_pos__[array2D_offset(2, ia, 3)];
double p = twopi * (ax * gvx + ay * gvy + az * gvz);
cuDoubleComplex z = cuConj(cuCmul(veff__[igloc], make_cuDoubleComplex(cos(p), sin(p))));
veff_a__[array2D_offset(2 * igloc, ia, 2 * num_gvec_loc__)] = z.x;
veff_a__[array2D_offset(2 * igloc + 1, ia, 2 * num_gvec_loc__)] = z.y;
}
}
extern "C" void mul_veff_with_phase_factors_gpu(int num_atoms__,
int num_gvec_loc__,
cuDoubleComplex const* veff__,
int const* gvec__,
double const* atom_pos__,
double* veff_a__)
{
dim3 grid_t(64);
dim3 grid_b(num_blocks(num_gvec_loc__, grid_t.x), num_atoms__);
mul_veff_with_phase_factors_gpu_kernel <<<grid_b, grid_t>>>
(
num_gvec_loc__,
veff__,
gvec__,
atom_pos__,
veff_a__
);
}
|
2d72c94afa0dd5e1895386a1929f959f6e896542.hip
|
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/hip/JitLoops.cuh>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/hip/Math.cuh>
#include <ATen/native/hip/jit_utils.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char shifted_chebyshev_polynomial_v_name[] = "shifted_chebyshev_polynomial_v_forward";
void shifted_chebyshev_polynomial_v_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_v_cuda", [&]() {
opmath_jitted_gpu_kernel_with_scalars<shifted_chebyshev_polynomial_v_name, scalar_t, scalar_t>(iterator, shifted_chebyshev_polynomial_v_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_v_cuda", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return shifted_chebyshev_polynomial_v_forward<scalar_t, true>(x, n);
});
});
#endif
} // shifted_chebyshev_polynomial_v_kernel_cuda
} // namespace (anonymous)
REGISTER_DISPATCH(shifted_chebyshev_polynomial_v_stub, &shifted_chebyshev_polynomial_v_kernel_cuda);
} // namespace at::native
|
2d72c94afa0dd5e1895386a1929f959f6e896542.cu
|
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char shifted_chebyshev_polynomial_v_name[] = "shifted_chebyshev_polynomial_v_forward";
void shifted_chebyshev_polynomial_v_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_v_cuda", [&]() {
opmath_jitted_gpu_kernel_with_scalars<shifted_chebyshev_polynomial_v_name, scalar_t, scalar_t>(iterator, shifted_chebyshev_polynomial_v_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_v_cuda", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return shifted_chebyshev_polynomial_v_forward<scalar_t, true>(x, n);
});
});
#endif
} // shifted_chebyshev_polynomial_v_kernel_cuda
} // namespace (anonymous)
REGISTER_DISPATCH(shifted_chebyshev_polynomial_v_stub, &shifted_chebyshev_polynomial_v_kernel_cuda);
} // namespace at::native
|
e9ec597fc6d90b501bfd5d45fe57c0fd6c3c5fbe.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
extern "C" {
#include "convolution.h"
#include "hip/hip_runtime.h"
}
void forward_conv_gpu(conv_layer cl, float *input_gpu) {
float alpha = 1.0f;
CUDNN_CHECK(cudnnConvolutionForward(cudnn_handler(),
&alpha,
cl.inputTensorDesc,
input_gpu,
cl.filterDesc,
cl.weight_gpu,
cl.convDesc,
cl.fw_algo,
cl.workspace_gpu,
cl.workspace_size,
&alpha,
cl.outputTensorDesc,
cl.output_gpu));
if (cl.s_type == BIAS) {
CUDNN_CHECK(cudnnAddTensor(cudnn_handler(),
&alpha,
cl.biasTensorDesc,
cl.bias_gpu,
&alpha,
cl.outputTensorDesc,
cl.output_gpu));
}
if (cl.s_type == BATCH_NORM) {
float one = 1;
float zero = 0;
CUDNN_CHECK(cudnnBatchNormalizationForwardInference(cudnn_handler(),
CUDNN_BATCHNORM_SPATIAL,
&one,
&zero,
cl.outputTensorDesc,
cl.output_gpu,
cl.outputTensorDesc,
cl.output_gpu,
cl.bnTensorDesc,
cl.bn_scale_gpu,
cl.bn_bias_gpu,
cl.bn_result_mean_gpu,
cl.bn_result_varience_gpu,
.00001));
}
if (cl.a_type != NONE_A) {
float one = 1;
float zero = 0;
CUDNN_CHECK(cudnnActivationForward(cudnn_handler(),
cl.activationDesc,
&one,
cl.outputTensorDesc,
cl.output_gpu,
&zero,
cl.outputTensorDesc,
cl.output_gpu))
}
}
|
e9ec597fc6d90b501bfd5d45fe57c0fd6c3c5fbe.cu
|
#include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
extern "C" {
#include "convolution.h"
#include "cuda.h"
}
void forward_conv_gpu(conv_layer cl, float *input_gpu) {
float alpha = 1.0f;
CUDNN_CHECK(cudnnConvolutionForward(cudnn_handler(),
&alpha,
cl.inputTensorDesc,
input_gpu,
cl.filterDesc,
cl.weight_gpu,
cl.convDesc,
cl.fw_algo,
cl.workspace_gpu,
cl.workspace_size,
&alpha,
cl.outputTensorDesc,
cl.output_gpu));
if (cl.s_type == BIAS) {
CUDNN_CHECK(cudnnAddTensor(cudnn_handler(),
&alpha,
cl.biasTensorDesc,
cl.bias_gpu,
&alpha,
cl.outputTensorDesc,
cl.output_gpu));
}
if (cl.s_type == BATCH_NORM) {
float one = 1;
float zero = 0;
CUDNN_CHECK(cudnnBatchNormalizationForwardInference(cudnn_handler(),
CUDNN_BATCHNORM_SPATIAL,
&one,
&zero,
cl.outputTensorDesc,
cl.output_gpu,
cl.outputTensorDesc,
cl.output_gpu,
cl.bnTensorDesc,
cl.bn_scale_gpu,
cl.bn_bias_gpu,
cl.bn_result_mean_gpu,
cl.bn_result_varience_gpu,
.00001));
}
if (cl.a_type != NONE_A) {
float one = 1;
float zero = 0;
CUDNN_CHECK(cudnnActivationForward(cudnn_handler(),
cl.activationDesc,
&one,
cl.outputTensorDesc,
cl.output_gpu,
&zero,
cl.outputTensorDesc,
cl.output_gpu))
}
}
|
95a4ab2242667c1688f827a01ec15243071346c7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "string.h"
#include "stdio.h"
#include "stdlib.h"
#include "math.h"
#include "pthread.h"
#include "immintrin.h"
#define CUDA_THREADS_2D 16
#define CUDA_THREADS_1D 256
#define MAX(x, y) (x >= y ? x : y)
#define MIN(x, y) (x <= y ? x : y)
#define P_THREADS 4
// [Conv2D]
__global__ void h_cuda_im2col(float* im_b, float* col_b,
int oh, int ow,
int iw, int ic,
int kh, int kw,
int sh, int sw)
{
int col_w = ic * kh * kw;
int col_i_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (col_i_idx < oh * ow) {
int patch_i = (col_i_idx / ow) * sh;
int patch_j = (col_i_idx % ow) * sw;
for (int c = 0; c < ic; ++c) {
int col_j = c * (kh * kw);
for (int di = 0; di < kh; ++di) {
for (int dj = 0; dj < kw; ++dj) {
col_b[col_i_idx * col_w +
col_j + (di * kw) + dj] =
im_b[(patch_i + di) * (iw * ic) +
(patch_j + dj) * ic +
c];
}
}
}
}
}
__global__ void h_cuda_matmul(float* imcol, float* kernel, float* result,
int m_size, int n_size, int k_size)
{
int i_idx = blockIdx.y * blockDim.y + threadIdx.y;
int j_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (i_idx < m_size && j_idx < n_size) {
float res = 0.0f;
for (int k = 0; k < k_size; ++k) {
res += imcol[i_idx * k_size + k] * kernel[k * n_size + j_idx];
}
result[i_idx * n_size + j_idx] = res;
}
}
void im2col(float* im_b,
float* col_b,
int oh, int ow,
int ih, int iw, int ic,
int kh, int kw,
int sh, int sw)
{
int col_w = ic * kh * kw;
for (int i = 0; i < oh; ++i) {
for (int j = 0; j < ow; ++j) {
int patch_i = i * sh;
int patch_j = j * sw;
for (int c = 0; c < ic; ++c) {
int col_i = i * ow + j;
int col_j = c * (kh * kw);
for (int di = 0; di < kh; ++di) {
for (int dj = 0; dj < kw; ++dj) {
col_b[col_i * col_w +
col_j + (di * kw) + dj] =
im_b[(patch_i + di) * (iw * ic) +
(patch_j + dj) * ic +
c];
}
}
}
}
}
}
struct shape_arg {
int oh, ow, od;
int ih, iw, ic;
int kh, kw;
int sh, sw;
};
struct im2col_thread_arg {
float* im_b;
float* col_b;
struct shape_arg* shape;
int oh_s;
int oh_e;
};
void* im2col_thread_func(void* thread_arg)
{
struct im2col_thread_arg* arg = (struct im2col_thread_arg*) thread_arg;
struct shape_arg* shape = arg->shape;
int col_w = shape->ic * shape->kh * shape->kw;
for (int i = arg->oh_s; i < arg->oh_e; ++i) {
for (int j = 0; j < shape->ow; ++j) {
int patch_i = i * shape->sh;
int patch_j = j * shape->sw;
for (int c = 0; c < shape->ic; ++c) {
int col_i = i * shape->ow + j;
int col_j = c * (shape->kh * shape->kw);
for (int di = 0; di < shape->kh; ++di) {
for (int dj = 0; dj < shape->kw; ++dj) {
arg->col_b[col_i * col_w +
col_j + (di * shape->kw) + dj] =
arg->im_b[(patch_i + di) * (shape->iw * shape->ic) +
(patch_j + dj) * shape->ic +
c];
}
}
}
}
}
return 0;
}
extern "C" {
void conv2d_cuda_pthread(float* in_layer,
float* col,
float* kernel_r,
float* result,
int batch,
int* shape_arg_arr)
{
struct shape_arg* shape = (struct shape_arg*) shape_arg_arr;
for (int b = 0; b < batch; ++b) {
float* im_b = in_layer + b * (shape->ih * shape->iw * shape->ic);
float* col_b = col + b * ((shape->oh * shape->ow) * (shape->ic * shape->kh * shape->kw));
float* result_b = result + b * (shape->oh * shape->ow * shape->od);
pthread_t threads[P_THREADS];
struct im2col_thread_arg t_args[P_THREADS];
int num_threads = MIN(P_THREADS, shape->oh);
int oh_part_size = shape->oh / num_threads;
t_args[0].im_b = im_b;
t_args[0].col_b = col_b;
t_args[0].shape = shape;
int t_id;
for (int t_idx = 0; t_idx < num_threads; ++t_idx) {
if (t_idx > 0) {
t_args[t_idx] = t_args[0];
}
int oh_s = oh_part_size * t_idx;
int oh_e = t_idx < num_threads - 1 ? oh_s + oh_part_size : shape->oh;
t_args[t_idx].oh_s = oh_s;
t_args[t_idx].oh_e = oh_e;
t_id = pthread_create(&threads[t_idx], NULL, im2col_thread_func, (void*) &t_args[t_idx]);
if (t_id < 0) {
perror("conv2d im2col thread error : ");
exit(0);
}
}
for (int t_idx = 0; t_idx < num_threads; ++t_idx) {
pthread_join(threads[t_idx], NULL);
}
// col_b : (oh * ow) X (ic * kh * kw)
// kernel_r : (ic * kh * kw) X od
int m_size = shape->oh * shape->ow;
int n_size = shape->od;
int k_size = shape->ic * shape->kh * shape->kw;
float* d_imcol;
float* d_kernel;
float* d_result;
hipMalloc((void **) &d_imcol, sizeof(float) * m_size * k_size);
hipMalloc((void **) &d_kernel, sizeof(float) * k_size * n_size);
hipMalloc((void **) &d_result, sizeof(float) * m_size * k_size);
hipMemcpy(d_imcol, col_b, sizeof(float) * m_size * k_size, hipMemcpyHostToDevice);
hipMemcpy(d_kernel, kernel_r, sizeof(float) * k_size * n_size, hipMemcpyHostToDevice);
// TODO: Optimize here for Yolov2tiny size
unsigned int grid_r = (m_size + CUDA_THREADS_2D - 1) / CUDA_THREADS_2D;
unsigned int grid_c = (n_size + CUDA_THREADS_2D - 1) / CUDA_THREADS_2D;
dim3 grid_dim(grid_c, grid_r);
dim3 block_dim(CUDA_THREADS_2D, CUDA_THREADS_2D);
hipLaunchKernelGGL(( h_cuda_matmul), dim3(grid_dim), dim3(block_dim), 0, 0, d_imcol, d_kernel, d_result, m_size, n_size, k_size);
hipFree(d_imcol);
hipFree(d_kernel);
hipMemcpy(result_b, d_result, sizeof(float) * m_size * n_size, hipMemcpyDeviceToHost);
hipFree(d_result);
}
}
void conv2d_cuda(float* in_layer,
float* col,
float* kernel_r,
float* result,
int batch, int oh, int ow, int od,
int ih, int iw, int ic,
int kh, int kw,
int sh, int sw)
{
for (int b = 0; b < batch; ++b) {
float* im_b = in_layer + b * (ih * iw * ic);
float* col_b = col + b * ((oh * ow) * (ic * kh * kw));
float* result_b = result + b * (oh * ow * od);
im2col(im_b,
col_b,
oh, ow,
ih, iw, ic,
kh, kw,
sh, sw);
// col_b : (oh * ow) X (ic * kh * kw)
// kernel_r : (ic * kh * kw) X od
int m_size = oh * ow;
int n_size = od;
int k_size = ic * kh * kw;
float* d_imcol;
float* d_kernel;
float* d_result;
hipMalloc((void **) &d_imcol, sizeof(float) * m_size * k_size);
hipMalloc((void **) &d_kernel, sizeof(float) * k_size * n_size);
hipMalloc((void **) &d_result, sizeof(float) * m_size * k_size);
hipMemcpy(d_imcol, col_b, sizeof(float) * m_size * k_size, hipMemcpyHostToDevice);
hipMemcpy(d_kernel, kernel_r, sizeof(float) * k_size * n_size, hipMemcpyHostToDevice);
// TODO: Optimize here for Yolov2tiny size
unsigned int grid_r = (m_size + CUDA_THREADS_2D - 1) / CUDA_THREADS_2D;
unsigned int grid_c = (n_size + CUDA_THREADS_2D - 1) / CUDA_THREADS_2D;
dim3 grid_dim(grid_c, grid_r);
dim3 block_dim(CUDA_THREADS_2D, CUDA_THREADS_2D);
hipLaunchKernelGGL(( h_cuda_matmul), dim3(grid_dim), dim3(block_dim), 0, 0, d_imcol, d_kernel, d_result, m_size, n_size, k_size);
hipFree(d_imcol);
hipFree(d_kernel);
hipMemcpy(result_b, d_result, sizeof(float) * m_size * n_size, hipMemcpyDeviceToHost);
hipFree(d_result);
}
}
void conv2d_cuda_im2col_cuda(float* in_layer,
float* col,
float* kernel_r,
float* result,
int batch, int oh, int ow, int od,
int ih, int iw, int ic,
int kh, int kw,
int sh, int sw)
{
for (int b = 0; b < batch; ++b) {
float* im_b = in_layer + b * (ih * iw * ic);
float* col_b = col + b * ((oh * ow) * (ic * kh * kw));
float* result_b = result + b * (oh * ow * od);
im2col(im_b,
col_b,
oh, ow,
ih, iw, ic,
kh, kw,
sh, sw);
// col_b : (oh * ow) X (ic * kh * kw)
// kernel_r : (ic * kh * kw) X od
int im_size = ih * iw * ic;
int m_size = oh * ow;
int n_size = od;
int k_size = ic * kh * kw;
float* d_im;
float* d_col;
float* d_kernel;
float* d_result;
hipMalloc((void **) &d_im, sizeof(float) * im_size);
hipMalloc((void **) &d_col, sizeof(float) * m_size * k_size);
hipMemcpy(d_im, im_b, sizeof(float) * im_size, hipMemcpyHostToDevice);
unsigned int grid_m = (m_size + CUDA_THREADS_1D - 1) / CUDA_THREADS_1D;
dim3 grid_m_dim(grid_m);
dim3 block_m_dim(CUDA_THREADS_1D);
hipLaunchKernelGGL(( h_cuda_im2col), dim3(grid_m_dim), dim3(block_m_dim), 0, 0, d_im, d_col,
oh, ow, iw, ic, kh, kw, sh, sw);
hipFree(d_im);
hipMalloc((void **) &d_kernel, sizeof(float) * k_size * n_size);
hipMalloc((void **) &d_result, sizeof(float) * m_size * k_size);
hipMemcpy(d_kernel, kernel_r, sizeof(float) * k_size * n_size, hipMemcpyHostToDevice);
// TODO: Optimize here for Yolov2tiny size
unsigned int grid_r = (m_size + CUDA_THREADS_2D - 1) / CUDA_THREADS_2D;
unsigned int grid_c = (n_size + CUDA_THREADS_2D - 1) / CUDA_THREADS_2D;
dim3 grid_dim(grid_c, grid_r);
dim3 block_dim(CUDA_THREADS_2D, CUDA_THREADS_2D);
hipLaunchKernelGGL(( h_cuda_matmul), dim3(grid_dim), dim3(block_dim), 0, 0, d_col, d_kernel, d_result, m_size, n_size, k_size);
hipFree(d_col);
hipFree(d_kernel);
hipMemcpy(result_b, d_result, sizeof(float) * m_size * n_size, hipMemcpyDeviceToHost);
hipFree(d_result);
}
}
} // extern C
// [BiasAdd]
__global__ void h_cuda_bias_add(
float* biases, float* result,
int r_size,
int od)
{
int t_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (t_idx < r_size) {
result[t_idx] += biases[t_idx % od];
}
}
extern "C" {
struct bias_add_thread_arg {
float* im_b;
float* biases;
float* result_b;
int ow;
int od;
int oh_s;
int oh_e;
};
void* bias_add_thread_func(void* thread_arg)
{
struct bias_add_thread_arg* arg = (struct bias_add_thread_arg*) thread_arg;
__m256 biases_av[arg->od / 8];
for (int d = 0; d <= arg->od - 8; d += 8) {
biases_av[d / 8] = _mm256_loadu_ps(arg->biases + d);
}
for (int i = arg->oh_s; i < arg->oh_e; ++i) {
for (int j = 0; j < arg->ow; ++j) {
int d;
for (d = 0; d <= arg->od - 8; d += 8) {
int ri = i * (arg->ow * arg->od) +
j * arg->od +
d;
__m256 in_av = _mm256_loadu_ps(arg->im_b + ri);
__m256 r_av = _mm256_add_ps(in_av, biases_av[d / 8]);
_mm256_storeu_ps(arg->result_b + ri, r_av);
}
if (d < arg->od) {
for (; d < arg->od; ++d) {
int rri = i * (arg->ow * arg->od) +
j * arg->od +
d;
arg->result_b[rri] = arg->im_b[rri] + arg->biases[d];
}
}
}
}
return 0;
}
void bias_add_pthread(float* in_layer, float* biases, float* result,
int batch, int oh, int ow, int od)
{
for (int b = 0; b < batch; ++b) {
float* im_b = in_layer + b * (oh * ow * od);
float* result_b = result + b * (oh * ow * od);
pthread_t threads[P_THREADS];
struct bias_add_thread_arg t_args[P_THREADS];
int num_threads = MIN(P_THREADS, oh);
int oh_part_size = oh / num_threads;
t_args[0].im_b = im_b;
t_args[0].biases = biases;
t_args[0].result_b = result_b;
t_args[0].ow = ow;
t_args[0].od = od;
int t_id;
for (int t_idx = 0; t_idx < num_threads; ++t_idx) {
if (t_idx > 0) {
t_args[t_idx] = t_args[0];
}
int oh_s = oh_part_size * t_idx;
int oh_e = t_idx < num_threads - 1 ? oh_s + oh_part_size : oh;
t_args[t_idx].oh_s = oh_s;
t_args[t_idx].oh_e = oh_e;
t_id = pthread_create(&threads[t_idx], NULL, bias_add_thread_func, (void*) &t_args[t_idx]);
if (t_id < 0) {
perror("bias add thread error : ");
exit(0);
}
}
for (int t_idx = 0; t_idx < num_threads; ++t_idx) {
pthread_join(threads[t_idx], NULL);
}
}
}
void bias_add_cuda(float* in_layer, float* biases, float* result,
int batch, int oh, int ow, int od)
{
int r_size = batch * oh * ow * od;
memcpy(result, in_layer, sizeof(float) * r_size);
float* d_biases;
float* d_result;
hipMalloc((void **) &d_result, sizeof(float) * r_size);
hipMalloc((void **) &d_biases, sizeof(float) * od);
hipMemcpy(d_result, result, sizeof(float) * r_size, hipMemcpyHostToDevice);
hipMemcpy(d_biases, biases, sizeof(float) * od, hipMemcpyHostToDevice);
unsigned int grid_size = (r_size + CUDA_THREADS_1D - 1) / CUDA_THREADS_1D;
dim3 grid_dim(grid_size);
dim3 block_dim(CUDA_THREADS_1D);
hipLaunchKernelGGL(( h_cuda_bias_add), dim3(grid_dim), dim3(block_dim), 0, 0, d_biases, d_result, r_size, od);
hipFree(d_biases);
hipMemcpy(result, d_result, sizeof(float) * r_size, hipMemcpyDeviceToHost);
hipFree(d_result);
}
void bias_add(float* in_layer, float* biases, float* result,
int batch, int oh, int ow, int od)
{
for (int b = 0; b < batch; ++b) {
for (int i = 0; i < oh; ++i) {
for (int j = 0; j < ow; ++j) {
for (int d = 0; d < od; ++d) {
int ri = b * (oh * ow * od) +
i * (ow * od) +
j * od +
d;
result[ri] = in_layer[ri] + biases[d];
}
}
}
}
}
} // extern C
// [Maxpool2D]
__global__ void h_cuda_max_pool2d(
float* in_layer, float* result,
int r_size,
int oh, int ow, int od,
int ih, int iw, int ic,
int kh, int kw,
int sh, int sw)
{
int t_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (t_idx < r_size) {
// Calc i, j, d.
int d = t_idx;
int i = d / (ow * od);
d -= i * (ow * od);
int j = d / od;
d -= j * od;
int ii = (i * sh) * (iw * ic) + (j * sw) * ic + d;
float imax = in_layer[ii];
for (int di = 0; di < kh; ++di) {
for (int dj = 0; dj < kw; ++dj) {
if (di > 0 || dj > 0) {
imax = MAX(imax,
in_layer[ii + di * (iw * ic) + dj * ic]);
}
}
}
result[t_idx] = imax;
}
}
extern "C" {
void max_pool2d(float* in_layer,
float* result,
int batch, int oh, int ow, int od,
int ih, int iw, int ic,
int kh, int kw,
int sh, int sw)
{
for (int b = 0; b < batch; ++b) {
for (int i = 0; i < oh; ++i) {
for (int j = 0; j < ow; ++j) {
for (int d = 0; d < od; ++d) {
int ii = (i * sh) * (iw * ic) + (j * sw) * ic + d;
float imax = in_layer[ii];
for (int di = 0; di < kh; ++di) {
for (int dj = 0; dj < kw; ++dj) {
if (di > 0 || dj > 0) {
imax = MAX(imax,
in_layer[ii + di * (iw * ic) + dj * ic]);
}
}
}
result[
b * (oh * ow * od) +
i * (ow * od) +
j * od +
d
] = imax;
}
}
}
}
}
void max_pool2d_test(float* in_layer,
float* result,
int batch, int oh, int ow, int od,
int ih, int iw, int ic,
int kh, int kw,
int sh, int sw)
{
for (int b = 0; b < batch; ++b) {
int r_size = oh * ow * od;
float* in_layer_b = in_layer + b * (ih * iw * ic);
float* result_b = result + b * (oh * ow * od);
for (int t_idx = 0; t_idx < r_size; ++t_idx) {
int d = t_idx;
int i = d / (ow * od);
d -= i * (ow * od);
int j = d / od;
d -= j * od;
int ii = (i * sh) * (iw * ic) + (j * sw) * ic + d;
float imax = in_layer_b[ii];
for (int di = 0; di < kh; ++di) {
for (int dj = 0; dj < kw; ++dj) {
if (di > 0 || dj > 0) {
imax = MAX(imax,
in_layer_b[ii + di * (iw * ic) + dj * ic]);
}
}
}
result_b[t_idx] = imax;
}
}
}
void max_pool2d_cuda(float* in_layer,
float* result,
int batch, int oh, int ow, int od,
int ih, int iw, int ic,
int kh, int kw,
int sh, int sw)
{
for (int b = 0; b < batch; ++b) {
int r_size = oh * ow * od;
int i_size = ih * iw * ic;
float* in_layer_b = in_layer + b * (ih * iw * ic);
float* result_b = result + b * (oh * ow * od);
float* d_in_layer;
float* d_result;
hipMalloc((void **) &d_in_layer, sizeof(float) * i_size);
hipMalloc((void **) &d_result, sizeof(float) * r_size);
hipMemcpy(d_in_layer, in_layer_b, sizeof(float) * i_size, hipMemcpyHostToDevice);
unsigned int grid_size = (r_size + CUDA_THREADS_1D - 1) / CUDA_THREADS_1D;
dim3 grid_dim(grid_size);
dim3 block_dim(CUDA_THREADS_1D);
hipLaunchKernelGGL(( h_cuda_max_pool2d), dim3(grid_dim), dim3(block_dim), 0, 0,
d_in_layer, d_result,
r_size,
oh, ow, od,
ih, iw, ic,
kh, kw,
sh, sw);
hipFree(d_in_layer);
hipMemcpy(result_b, d_result, sizeof(float) * r_size, hipMemcpyDeviceToHost);
hipFree(d_result);
}
}
void max_pool2d_avx(float* in_layer,
float* result,
int batch, int oh, int ow, int od,
int ih, int iw, int ic,
int kh, int kw,
int sh, int sw)
{
for (int b = 0; b < batch; ++b) {
for (int i = 0; i < oh; ++i) {
for (int j = 0; j < ow; ++j) {
int in_i = i * sh;
int in_j = j * sw;
int i_idx = b * (ih * iw * ic) +
in_i * (iw * ic) +
in_j * ic;
int r_idx = b * (oh * ow * od) +
i * (ow * od) +
j * od;
int d;
for (d = 0; d <= od - 8; d += 8) {
__m256 imax_av = _mm256_loadu_ps(in_layer + i_idx + d);
for (int di = 0; di < kh; ++di) {
for (int dj = 0; dj < kw; ++dj) {
__m256 icand_av = _mm256_loadu_ps(
in_layer + i_idx +
di * (iw * ic) +
dj * ic +
d);
imax_av = _mm256_max_ps(imax_av, icand_av);
}
}
_mm256_storeu_ps(result + r_idx + d, imax_av);
}
if (d < od) {
for (; d < od; ++d) {
float imax = in_layer[i_idx + d];
for (int di = 0; di < kh; ++di) {
for (int dj = 0; dj < kw; ++dj) {
imax = MAX(imax,
in_layer[i_idx +
di * (iw * ic) +
dj * ic +
d]);
}
result[r_idx + d] = imax;
}
}
}
}
}
}
}
} // extern C
// [BatchNorm]
__global__ void h_cuda_batch_norm(float* alpha, float* beta, float* result,
int r_size, int od)
{
int t_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (t_idx < r_size) {
int d = t_idx % od;
result[t_idx] = result[t_idx] * alpha[d] - beta[d];
}
}
__global__ void h_cuda_batch_norm2(float* in_layer, float* alpha, float* beta, float* result,
int r_size, int od)
{
int t_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (t_idx < r_size) {
int d = t_idx % od;
result[t_idx] = in_layer[t_idx] * alpha[d] - beta[d];
}
}
extern "C" {
void batch_norm_cuda(float* in_layer,
float* alpha,
float* beta,
float* result,
int batch, int oh, int ow, int od)
{
int r_size = batch * oh * ow * od;
memcpy(result, in_layer, sizeof(float) * r_size);
float* d_alpha;
float* d_beta;
float* d_result;
hipMalloc((void **) &d_alpha, sizeof(float) * od);
hipMalloc((void **) &d_beta, sizeof(float) * od);
hipMalloc((void **) &d_result, sizeof(float) * r_size);
hipMemcpy(d_result, result, sizeof(float) * r_size, hipMemcpyHostToDevice);
hipMemcpy(d_alpha, alpha, sizeof(float) * od, hipMemcpyHostToDevice);
hipMemcpy(d_beta, beta, sizeof(float) * od, hipMemcpyHostToDevice);
unsigned int grid_size = (r_size + CUDA_THREADS_1D - 1) / CUDA_THREADS_1D;
dim3 grid_dim(grid_size);
dim3 block_dim(CUDA_THREADS_1D);
hipLaunchKernelGGL(( h_cuda_batch_norm), dim3(grid_dim), dim3(block_dim), 0, 0, d_alpha, d_beta, d_result, r_size, od);
hipFree(d_alpha);
hipFree(d_beta);
hipMemcpy(result, d_result, sizeof(float) * r_size, hipMemcpyDeviceToHost);
hipFree(d_result);
}
void batch_norm_cuda2(float* in_layer,
float* alpha,
float* beta,
float* result,
int batch, int oh, int ow, int od)
{
int r_size = batch * oh * ow * od;
float* d_in_layer;
float* d_alpha;
float* d_beta;
float* d_result;
hipMalloc((void **) &d_in_layer, sizeof(float) * r_size);
hipMalloc((void **) &d_alpha, sizeof(float) * od);
hipMalloc((void **) &d_beta, sizeof(float) * od);
hipMalloc((void **) &d_result, sizeof(float) * r_size);
hipMemcpy(d_in_layer, in_layer, sizeof(float) * r_size, hipMemcpyHostToDevice);
hipMemcpy(d_alpha, alpha, sizeof(float) * od, hipMemcpyHostToDevice);
hipMemcpy(d_beta, beta, sizeof(float) * od, hipMemcpyHostToDevice);
unsigned int grid_size = (r_size + CUDA_THREADS_1D - 1) / CUDA_THREADS_1D;
dim3 grid_dim(grid_size);
dim3 block_dim(CUDA_THREADS_1D);
hipLaunchKernelGGL(( h_cuda_batch_norm2), dim3(grid_dim), dim3(block_dim), 0, 0, d_in_layer, d_alpha, d_beta, d_result, r_size, od);
hipFree(d_in_layer);
hipFree(d_alpha);
hipFree(d_beta);
hipMemcpy(result, d_result, sizeof(float) * r_size, hipMemcpyDeviceToHost);
hipFree(d_result);
}
} // extern C
// [LeakRelu]
extern "C" {
void leaky_relu(float* in_layer,
float* result,
int batch, int oh, int ow, int od)
{
for (int b = 0; b < batch; ++b) {
for (int i = 0; i < oh; ++i) {
for (int j = 0; j < ow; ++j) {
for (int d = 0; d < od; ++d) {
int idx = b * (oh * ow * od) +
i * (ow * od) +
j * od +
d;
float t = in_layer[idx];
result[idx] = t < 0 ? 0.1 * t : t;
}
}
}
}
}
} // extern C
|
95a4ab2242667c1688f827a01ec15243071346c7.cu
|
#include "string.h"
#include "stdio.h"
#include "stdlib.h"
#include "math.h"
#include "pthread.h"
#include "immintrin.h"
#define CUDA_THREADS_2D 16
#define CUDA_THREADS_1D 256
#define MAX(x, y) (x >= y ? x : y)
#define MIN(x, y) (x <= y ? x : y)
#define P_THREADS 4
// [Conv2D]
__global__ void h_cuda_im2col(float* im_b, float* col_b,
int oh, int ow,
int iw, int ic,
int kh, int kw,
int sh, int sw)
{
int col_w = ic * kh * kw;
int col_i_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (col_i_idx < oh * ow) {
int patch_i = (col_i_idx / ow) * sh;
int patch_j = (col_i_idx % ow) * sw;
for (int c = 0; c < ic; ++c) {
int col_j = c * (kh * kw);
for (int di = 0; di < kh; ++di) {
for (int dj = 0; dj < kw; ++dj) {
col_b[col_i_idx * col_w +
col_j + (di * kw) + dj] =
im_b[(patch_i + di) * (iw * ic) +
(patch_j + dj) * ic +
c];
}
}
}
}
}
__global__ void h_cuda_matmul(float* imcol, float* kernel, float* result,
int m_size, int n_size, int k_size)
{
int i_idx = blockIdx.y * blockDim.y + threadIdx.y;
int j_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (i_idx < m_size && j_idx < n_size) {
float res = 0.0f;
for (int k = 0; k < k_size; ++k) {
res += imcol[i_idx * k_size + k] * kernel[k * n_size + j_idx];
}
result[i_idx * n_size + j_idx] = res;
}
}
void im2col(float* im_b,
float* col_b,
int oh, int ow,
int ih, int iw, int ic,
int kh, int kw,
int sh, int sw)
{
int col_w = ic * kh * kw;
for (int i = 0; i < oh; ++i) {
for (int j = 0; j < ow; ++j) {
int patch_i = i * sh;
int patch_j = j * sw;
for (int c = 0; c < ic; ++c) {
int col_i = i * ow + j;
int col_j = c * (kh * kw);
for (int di = 0; di < kh; ++di) {
for (int dj = 0; dj < kw; ++dj) {
col_b[col_i * col_w +
col_j + (di * kw) + dj] =
im_b[(patch_i + di) * (iw * ic) +
(patch_j + dj) * ic +
c];
}
}
}
}
}
}
struct shape_arg {
int oh, ow, od;
int ih, iw, ic;
int kh, kw;
int sh, sw;
};
struct im2col_thread_arg {
float* im_b;
float* col_b;
struct shape_arg* shape;
int oh_s;
int oh_e;
};
void* im2col_thread_func(void* thread_arg)
{
struct im2col_thread_arg* arg = (struct im2col_thread_arg*) thread_arg;
struct shape_arg* shape = arg->shape;
int col_w = shape->ic * shape->kh * shape->kw;
for (int i = arg->oh_s; i < arg->oh_e; ++i) {
for (int j = 0; j < shape->ow; ++j) {
int patch_i = i * shape->sh;
int patch_j = j * shape->sw;
for (int c = 0; c < shape->ic; ++c) {
int col_i = i * shape->ow + j;
int col_j = c * (shape->kh * shape->kw);
for (int di = 0; di < shape->kh; ++di) {
for (int dj = 0; dj < shape->kw; ++dj) {
arg->col_b[col_i * col_w +
col_j + (di * shape->kw) + dj] =
arg->im_b[(patch_i + di) * (shape->iw * shape->ic) +
(patch_j + dj) * shape->ic +
c];
}
}
}
}
}
return 0;
}
extern "C" {
void conv2d_cuda_pthread(float* in_layer,
float* col,
float* kernel_r,
float* result,
int batch,
int* shape_arg_arr)
{
struct shape_arg* shape = (struct shape_arg*) shape_arg_arr;
for (int b = 0; b < batch; ++b) {
float* im_b = in_layer + b * (shape->ih * shape->iw * shape->ic);
float* col_b = col + b * ((shape->oh * shape->ow) * (shape->ic * shape->kh * shape->kw));
float* result_b = result + b * (shape->oh * shape->ow * shape->od);
pthread_t threads[P_THREADS];
struct im2col_thread_arg t_args[P_THREADS];
int num_threads = MIN(P_THREADS, shape->oh);
int oh_part_size = shape->oh / num_threads;
t_args[0].im_b = im_b;
t_args[0].col_b = col_b;
t_args[0].shape = shape;
int t_id;
for (int t_idx = 0; t_idx < num_threads; ++t_idx) {
if (t_idx > 0) {
t_args[t_idx] = t_args[0];
}
int oh_s = oh_part_size * t_idx;
int oh_e = t_idx < num_threads - 1 ? oh_s + oh_part_size : shape->oh;
t_args[t_idx].oh_s = oh_s;
t_args[t_idx].oh_e = oh_e;
t_id = pthread_create(&threads[t_idx], NULL, im2col_thread_func, (void*) &t_args[t_idx]);
if (t_id < 0) {
perror("conv2d im2col thread error : ");
exit(0);
}
}
for (int t_idx = 0; t_idx < num_threads; ++t_idx) {
pthread_join(threads[t_idx], NULL);
}
// col_b : (oh * ow) X (ic * kh * kw)
// kernel_r : (ic * kh * kw) X od
int m_size = shape->oh * shape->ow;
int n_size = shape->od;
int k_size = shape->ic * shape->kh * shape->kw;
float* d_imcol;
float* d_kernel;
float* d_result;
cudaMalloc((void **) &d_imcol, sizeof(float) * m_size * k_size);
cudaMalloc((void **) &d_kernel, sizeof(float) * k_size * n_size);
cudaMalloc((void **) &d_result, sizeof(float) * m_size * k_size);
cudaMemcpy(d_imcol, col_b, sizeof(float) * m_size * k_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_kernel, kernel_r, sizeof(float) * k_size * n_size, cudaMemcpyHostToDevice);
// TODO: Optimize here for Yolov2tiny size
unsigned int grid_r = (m_size + CUDA_THREADS_2D - 1) / CUDA_THREADS_2D;
unsigned int grid_c = (n_size + CUDA_THREADS_2D - 1) / CUDA_THREADS_2D;
dim3 grid_dim(grid_c, grid_r);
dim3 block_dim(CUDA_THREADS_2D, CUDA_THREADS_2D);
h_cuda_matmul<<<grid_dim, block_dim>>>(d_imcol, d_kernel, d_result, m_size, n_size, k_size);
cudaFree(d_imcol);
cudaFree(d_kernel);
cudaMemcpy(result_b, d_result, sizeof(float) * m_size * n_size, cudaMemcpyDeviceToHost);
cudaFree(d_result);
}
}
void conv2d_cuda(float* in_layer,
float* col,
float* kernel_r,
float* result,
int batch, int oh, int ow, int od,
int ih, int iw, int ic,
int kh, int kw,
int sh, int sw)
{
for (int b = 0; b < batch; ++b) {
float* im_b = in_layer + b * (ih * iw * ic);
float* col_b = col + b * ((oh * ow) * (ic * kh * kw));
float* result_b = result + b * (oh * ow * od);
im2col(im_b,
col_b,
oh, ow,
ih, iw, ic,
kh, kw,
sh, sw);
// col_b : (oh * ow) X (ic * kh * kw)
// kernel_r : (ic * kh * kw) X od
int m_size = oh * ow;
int n_size = od;
int k_size = ic * kh * kw;
float* d_imcol;
float* d_kernel;
float* d_result;
cudaMalloc((void **) &d_imcol, sizeof(float) * m_size * k_size);
cudaMalloc((void **) &d_kernel, sizeof(float) * k_size * n_size);
cudaMalloc((void **) &d_result, sizeof(float) * m_size * k_size);
cudaMemcpy(d_imcol, col_b, sizeof(float) * m_size * k_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_kernel, kernel_r, sizeof(float) * k_size * n_size, cudaMemcpyHostToDevice);
// TODO: Optimize here for Yolov2tiny size
unsigned int grid_r = (m_size + CUDA_THREADS_2D - 1) / CUDA_THREADS_2D;
unsigned int grid_c = (n_size + CUDA_THREADS_2D - 1) / CUDA_THREADS_2D;
dim3 grid_dim(grid_c, grid_r);
dim3 block_dim(CUDA_THREADS_2D, CUDA_THREADS_2D);
h_cuda_matmul<<<grid_dim, block_dim>>>(d_imcol, d_kernel, d_result, m_size, n_size, k_size);
cudaFree(d_imcol);
cudaFree(d_kernel);
cudaMemcpy(result_b, d_result, sizeof(float) * m_size * n_size, cudaMemcpyDeviceToHost);
cudaFree(d_result);
}
}
void conv2d_cuda_im2col_cuda(float* in_layer,
float* col,
float* kernel_r,
float* result,
int batch, int oh, int ow, int od,
int ih, int iw, int ic,
int kh, int kw,
int sh, int sw)
{
for (int b = 0; b < batch; ++b) {
float* im_b = in_layer + b * (ih * iw * ic);
float* col_b = col + b * ((oh * ow) * (ic * kh * kw));
float* result_b = result + b * (oh * ow * od);
im2col(im_b,
col_b,
oh, ow,
ih, iw, ic,
kh, kw,
sh, sw);
// col_b : (oh * ow) X (ic * kh * kw)
// kernel_r : (ic * kh * kw) X od
int im_size = ih * iw * ic;
int m_size = oh * ow;
int n_size = od;
int k_size = ic * kh * kw;
float* d_im;
float* d_col;
float* d_kernel;
float* d_result;
cudaMalloc((void **) &d_im, sizeof(float) * im_size);
cudaMalloc((void **) &d_col, sizeof(float) * m_size * k_size);
cudaMemcpy(d_im, im_b, sizeof(float) * im_size, cudaMemcpyHostToDevice);
unsigned int grid_m = (m_size + CUDA_THREADS_1D - 1) / CUDA_THREADS_1D;
dim3 grid_m_dim(grid_m);
dim3 block_m_dim(CUDA_THREADS_1D);
h_cuda_im2col<<<grid_m_dim, block_m_dim>>>(d_im, d_col,
oh, ow, iw, ic, kh, kw, sh, sw);
cudaFree(d_im);
cudaMalloc((void **) &d_kernel, sizeof(float) * k_size * n_size);
cudaMalloc((void **) &d_result, sizeof(float) * m_size * k_size);
cudaMemcpy(d_kernel, kernel_r, sizeof(float) * k_size * n_size, cudaMemcpyHostToDevice);
// TODO: Optimize here for Yolov2tiny size
unsigned int grid_r = (m_size + CUDA_THREADS_2D - 1) / CUDA_THREADS_2D;
unsigned int grid_c = (n_size + CUDA_THREADS_2D - 1) / CUDA_THREADS_2D;
dim3 grid_dim(grid_c, grid_r);
dim3 block_dim(CUDA_THREADS_2D, CUDA_THREADS_2D);
h_cuda_matmul<<<grid_dim, block_dim>>>(d_col, d_kernel, d_result, m_size, n_size, k_size);
cudaFree(d_col);
cudaFree(d_kernel);
cudaMemcpy(result_b, d_result, sizeof(float) * m_size * n_size, cudaMemcpyDeviceToHost);
cudaFree(d_result);
}
}
} // extern C
// [BiasAdd]
__global__ void h_cuda_bias_add(
float* biases, float* result,
int r_size,
int od)
{
int t_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (t_idx < r_size) {
result[t_idx] += biases[t_idx % od];
}
}
extern "C" {
struct bias_add_thread_arg {
float* im_b;
float* biases;
float* result_b;
int ow;
int od;
int oh_s;
int oh_e;
};
void* bias_add_thread_func(void* thread_arg)
{
struct bias_add_thread_arg* arg = (struct bias_add_thread_arg*) thread_arg;
__m256 biases_av[arg->od / 8];
for (int d = 0; d <= arg->od - 8; d += 8) {
biases_av[d / 8] = _mm256_loadu_ps(arg->biases + d);
}
for (int i = arg->oh_s; i < arg->oh_e; ++i) {
for (int j = 0; j < arg->ow; ++j) {
int d;
for (d = 0; d <= arg->od - 8; d += 8) {
int ri = i * (arg->ow * arg->od) +
j * arg->od +
d;
__m256 in_av = _mm256_loadu_ps(arg->im_b + ri);
__m256 r_av = _mm256_add_ps(in_av, biases_av[d / 8]);
_mm256_storeu_ps(arg->result_b + ri, r_av);
}
if (d < arg->od) {
for (; d < arg->od; ++d) {
int rri = i * (arg->ow * arg->od) +
j * arg->od +
d;
arg->result_b[rri] = arg->im_b[rri] + arg->biases[d];
}
}
}
}
return 0;
}
void bias_add_pthread(float* in_layer, float* biases, float* result,
int batch, int oh, int ow, int od)
{
for (int b = 0; b < batch; ++b) {
float* im_b = in_layer + b * (oh * ow * od);
float* result_b = result + b * (oh * ow * od);
pthread_t threads[P_THREADS];
struct bias_add_thread_arg t_args[P_THREADS];
int num_threads = MIN(P_THREADS, oh);
int oh_part_size = oh / num_threads;
t_args[0].im_b = im_b;
t_args[0].biases = biases;
t_args[0].result_b = result_b;
t_args[0].ow = ow;
t_args[0].od = od;
int t_id;
for (int t_idx = 0; t_idx < num_threads; ++t_idx) {
if (t_idx > 0) {
t_args[t_idx] = t_args[0];
}
int oh_s = oh_part_size * t_idx;
int oh_e = t_idx < num_threads - 1 ? oh_s + oh_part_size : oh;
t_args[t_idx].oh_s = oh_s;
t_args[t_idx].oh_e = oh_e;
t_id = pthread_create(&threads[t_idx], NULL, bias_add_thread_func, (void*) &t_args[t_idx]);
if (t_id < 0) {
perror("bias add thread error : ");
exit(0);
}
}
for (int t_idx = 0; t_idx < num_threads; ++t_idx) {
pthread_join(threads[t_idx], NULL);
}
}
}
void bias_add_cuda(float* in_layer, float* biases, float* result,
int batch, int oh, int ow, int od)
{
int r_size = batch * oh * ow * od;
memcpy(result, in_layer, sizeof(float) * r_size);
float* d_biases;
float* d_result;
cudaMalloc((void **) &d_result, sizeof(float) * r_size);
cudaMalloc((void **) &d_biases, sizeof(float) * od);
cudaMemcpy(d_result, result, sizeof(float) * r_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_biases, biases, sizeof(float) * od, cudaMemcpyHostToDevice);
unsigned int grid_size = (r_size + CUDA_THREADS_1D - 1) / CUDA_THREADS_1D;
dim3 grid_dim(grid_size);
dim3 block_dim(CUDA_THREADS_1D);
h_cuda_bias_add<<<grid_dim, block_dim>>>(d_biases, d_result, r_size, od);
cudaFree(d_biases);
cudaMemcpy(result, d_result, sizeof(float) * r_size, cudaMemcpyDeviceToHost);
cudaFree(d_result);
}
void bias_add(float* in_layer, float* biases, float* result,
int batch, int oh, int ow, int od)
{
for (int b = 0; b < batch; ++b) {
for (int i = 0; i < oh; ++i) {
for (int j = 0; j < ow; ++j) {
for (int d = 0; d < od; ++d) {
int ri = b * (oh * ow * od) +
i * (ow * od) +
j * od +
d;
result[ri] = in_layer[ri] + biases[d];
}
}
}
}
}
} // extern C
// [Maxpool2D]
__global__ void h_cuda_max_pool2d(
float* in_layer, float* result,
int r_size,
int oh, int ow, int od,
int ih, int iw, int ic,
int kh, int kw,
int sh, int sw)
{
int t_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (t_idx < r_size) {
// Calc i, j, d.
int d = t_idx;
int i = d / (ow * od);
d -= i * (ow * od);
int j = d / od;
d -= j * od;
int ii = (i * sh) * (iw * ic) + (j * sw) * ic + d;
float imax = in_layer[ii];
for (int di = 0; di < kh; ++di) {
for (int dj = 0; dj < kw; ++dj) {
if (di > 0 || dj > 0) {
imax = MAX(imax,
in_layer[ii + di * (iw * ic) + dj * ic]);
}
}
}
result[t_idx] = imax;
}
}
extern "C" {
void max_pool2d(float* in_layer,
float* result,
int batch, int oh, int ow, int od,
int ih, int iw, int ic,
int kh, int kw,
int sh, int sw)
{
for (int b = 0; b < batch; ++b) {
for (int i = 0; i < oh; ++i) {
for (int j = 0; j < ow; ++j) {
for (int d = 0; d < od; ++d) {
int ii = (i * sh) * (iw * ic) + (j * sw) * ic + d;
float imax = in_layer[ii];
for (int di = 0; di < kh; ++di) {
for (int dj = 0; dj < kw; ++dj) {
if (di > 0 || dj > 0) {
imax = MAX(imax,
in_layer[ii + di * (iw * ic) + dj * ic]);
}
}
}
result[
b * (oh * ow * od) +
i * (ow * od) +
j * od +
d
] = imax;
}
}
}
}
}
void max_pool2d_test(float* in_layer,
float* result,
int batch, int oh, int ow, int od,
int ih, int iw, int ic,
int kh, int kw,
int sh, int sw)
{
for (int b = 0; b < batch; ++b) {
int r_size = oh * ow * od;
float* in_layer_b = in_layer + b * (ih * iw * ic);
float* result_b = result + b * (oh * ow * od);
for (int t_idx = 0; t_idx < r_size; ++t_idx) {
int d = t_idx;
int i = d / (ow * od);
d -= i * (ow * od);
int j = d / od;
d -= j * od;
int ii = (i * sh) * (iw * ic) + (j * sw) * ic + d;
float imax = in_layer_b[ii];
for (int di = 0; di < kh; ++di) {
for (int dj = 0; dj < kw; ++dj) {
if (di > 0 || dj > 0) {
imax = MAX(imax,
in_layer_b[ii + di * (iw * ic) + dj * ic]);
}
}
}
result_b[t_idx] = imax;
}
}
}
void max_pool2d_cuda(float* in_layer,
float* result,
int batch, int oh, int ow, int od,
int ih, int iw, int ic,
int kh, int kw,
int sh, int sw)
{
for (int b = 0; b < batch; ++b) {
int r_size = oh * ow * od;
int i_size = ih * iw * ic;
float* in_layer_b = in_layer + b * (ih * iw * ic);
float* result_b = result + b * (oh * ow * od);
float* d_in_layer;
float* d_result;
cudaMalloc((void **) &d_in_layer, sizeof(float) * i_size);
cudaMalloc((void **) &d_result, sizeof(float) * r_size);
cudaMemcpy(d_in_layer, in_layer_b, sizeof(float) * i_size, cudaMemcpyHostToDevice);
unsigned int grid_size = (r_size + CUDA_THREADS_1D - 1) / CUDA_THREADS_1D;
dim3 grid_dim(grid_size);
dim3 block_dim(CUDA_THREADS_1D);
h_cuda_max_pool2d<<<grid_dim, block_dim>>>(
d_in_layer, d_result,
r_size,
oh, ow, od,
ih, iw, ic,
kh, kw,
sh, sw);
cudaFree(d_in_layer);
cudaMemcpy(result_b, d_result, sizeof(float) * r_size, cudaMemcpyDeviceToHost);
cudaFree(d_result);
}
}
void max_pool2d_avx(float* in_layer,
float* result,
int batch, int oh, int ow, int od,
int ih, int iw, int ic,
int kh, int kw,
int sh, int sw)
{
for (int b = 0; b < batch; ++b) {
for (int i = 0; i < oh; ++i) {
for (int j = 0; j < ow; ++j) {
int in_i = i * sh;
int in_j = j * sw;
int i_idx = b * (ih * iw * ic) +
in_i * (iw * ic) +
in_j * ic;
int r_idx = b * (oh * ow * od) +
i * (ow * od) +
j * od;
int d;
for (d = 0; d <= od - 8; d += 8) {
__m256 imax_av = _mm256_loadu_ps(in_layer + i_idx + d);
for (int di = 0; di < kh; ++di) {
for (int dj = 0; dj < kw; ++dj) {
__m256 icand_av = _mm256_loadu_ps(
in_layer + i_idx +
di * (iw * ic) +
dj * ic +
d);
imax_av = _mm256_max_ps(imax_av, icand_av);
}
}
_mm256_storeu_ps(result + r_idx + d, imax_av);
}
if (d < od) {
for (; d < od; ++d) {
float imax = in_layer[i_idx + d];
for (int di = 0; di < kh; ++di) {
for (int dj = 0; dj < kw; ++dj) {
imax = MAX(imax,
in_layer[i_idx +
di * (iw * ic) +
dj * ic +
d]);
}
result[r_idx + d] = imax;
}
}
}
}
}
}
}
} // extern C
// [BatchNorm]
__global__ void h_cuda_batch_norm(float* alpha, float* beta, float* result,
int r_size, int od)
{
int t_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (t_idx < r_size) {
int d = t_idx % od;
result[t_idx] = result[t_idx] * alpha[d] - beta[d];
}
}
__global__ void h_cuda_batch_norm2(float* in_layer, float* alpha, float* beta, float* result,
int r_size, int od)
{
int t_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (t_idx < r_size) {
int d = t_idx % od;
result[t_idx] = in_layer[t_idx] * alpha[d] - beta[d];
}
}
extern "C" {
void batch_norm_cuda(float* in_layer,
float* alpha,
float* beta,
float* result,
int batch, int oh, int ow, int od)
{
int r_size = batch * oh * ow * od;
memcpy(result, in_layer, sizeof(float) * r_size);
float* d_alpha;
float* d_beta;
float* d_result;
cudaMalloc((void **) &d_alpha, sizeof(float) * od);
cudaMalloc((void **) &d_beta, sizeof(float) * od);
cudaMalloc((void **) &d_result, sizeof(float) * r_size);
cudaMemcpy(d_result, result, sizeof(float) * r_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_alpha, alpha, sizeof(float) * od, cudaMemcpyHostToDevice);
cudaMemcpy(d_beta, beta, sizeof(float) * od, cudaMemcpyHostToDevice);
unsigned int grid_size = (r_size + CUDA_THREADS_1D - 1) / CUDA_THREADS_1D;
dim3 grid_dim(grid_size);
dim3 block_dim(CUDA_THREADS_1D);
h_cuda_batch_norm<<<grid_dim, block_dim>>>(d_alpha, d_beta, d_result, r_size, od);
cudaFree(d_alpha);
cudaFree(d_beta);
cudaMemcpy(result, d_result, sizeof(float) * r_size, cudaMemcpyDeviceToHost);
cudaFree(d_result);
}
void batch_norm_cuda2(float* in_layer,
float* alpha,
float* beta,
float* result,
int batch, int oh, int ow, int od)
{
int r_size = batch * oh * ow * od;
float* d_in_layer;
float* d_alpha;
float* d_beta;
float* d_result;
cudaMalloc((void **) &d_in_layer, sizeof(float) * r_size);
cudaMalloc((void **) &d_alpha, sizeof(float) * od);
cudaMalloc((void **) &d_beta, sizeof(float) * od);
cudaMalloc((void **) &d_result, sizeof(float) * r_size);
cudaMemcpy(d_in_layer, in_layer, sizeof(float) * r_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_alpha, alpha, sizeof(float) * od, cudaMemcpyHostToDevice);
cudaMemcpy(d_beta, beta, sizeof(float) * od, cudaMemcpyHostToDevice);
unsigned int grid_size = (r_size + CUDA_THREADS_1D - 1) / CUDA_THREADS_1D;
dim3 grid_dim(grid_size);
dim3 block_dim(CUDA_THREADS_1D);
h_cuda_batch_norm2<<<grid_dim, block_dim>>>(d_in_layer, d_alpha, d_beta, d_result, r_size, od);
cudaFree(d_in_layer);
cudaFree(d_alpha);
cudaFree(d_beta);
cudaMemcpy(result, d_result, sizeof(float) * r_size, cudaMemcpyDeviceToHost);
cudaFree(d_result);
}
} // extern C
// [LeakRelu]
extern "C" {
void leaky_relu(float* in_layer,
float* result,
int batch, int oh, int ow, int od)
{
for (int b = 0; b < batch; ++b) {
for (int i = 0; i < oh; ++i) {
for (int j = 0; j < ow; ++j) {
for (int d = 0; d < od; ++d) {
int idx = b * (oh * ow * od) +
i * (ow * od) +
j * od +
d;
float t = in_layer[idx];
result[idx] = t < 0 ? 0.1 * t : t;
}
}
}
}
}
} // extern C
|
3a379917b195e4e3effb9069c4d0cabf020ff8ae.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <raft/random/rng.cuh>
#include "../test_utils.h"
#include <raft/sparse/op/sort.h>
#include <raft/mr/device/allocator.hpp>
#include <raft/sparse/coo.cuh>
#include <raft/sparse/op/filter.cuh>
#include <iostream>
namespace raft {
namespace sparse {
template <typename T>
struct SparseFilterInputs {
int m, n, nnz;
unsigned long long int seed;
};
template <typename T>
class SparseFilterTests
: public ::testing::TestWithParam<SparseFilterInputs<T>> {
protected:
void SetUp() override {}
void TearDown() override {}
protected:
SparseFilterInputs<T> params;
};
const std::vector<SparseFilterInputs<float>> inputsf = {{5, 10, 5, 1234ULL}};
typedef SparseFilterTests<float> COORemoveZeros;
TEST_P(COORemoveZeros, Result) {
hipStream_t stream;
hipStreamCreate(&stream);
std::shared_ptr<raft::mr::device::allocator> alloc(
new raft::mr::device::default_allocator);
params = ::testing::TestWithParam<SparseFilterInputs<float>>::GetParam();
float *in_h_vals = new float[params.nnz];
COO<float> in(alloc, stream, params.nnz, 5, 5);
raft::random::Rng r(params.seed);
r.uniform(in.vals(), params.nnz, float(-1.0), float(1.0), stream);
raft::update_host(in_h_vals, in.vals(), params.nnz, stream);
in_h_vals[0] = 0;
in_h_vals[2] = 0;
in_h_vals[3] = 0;
int *in_h_rows = new int[params.nnz];
int *in_h_cols = new int[params.nnz];
for (int i = 0; i < params.nnz; i++) {
in_h_rows[i] = params.nnz - i - 1;
in_h_cols[i] = i;
}
raft::update_device(in.rows(), in_h_rows, params.nnz, stream);
raft::update_device(in.cols(), in_h_cols, params.nnz, stream);
raft::update_device(in.vals(), in_h_vals, params.nnz, stream);
op::coo_sort<float>(&in, alloc, stream);
int out_rows_ref_h[2] = {0, 3};
int out_cols_ref_h[2] = {4, 1};
float *out_vals_ref_h = (float *)malloc(2 * sizeof(float));
out_vals_ref_h[0] = in_h_vals[4];
out_vals_ref_h[1] = in_h_vals[1];
COO<float> out_ref(alloc, stream, 2, 5, 5);
COO<float> out(alloc, stream);
raft::update_device(out_ref.rows(), *&out_rows_ref_h, 2, stream);
raft::update_device(out_ref.cols(), *&out_cols_ref_h, 2, stream);
raft::update_device(out_ref.vals(), out_vals_ref_h, 2, stream);
op::coo_remove_zeros<32, float>(&in, &out, alloc, stream);
ASSERT_TRUE(raft::devArrMatch<int>(out_ref.rows(), out.rows(), 2,
raft::Compare<int>()));
ASSERT_TRUE(raft::devArrMatch<int>(out_ref.cols(), out.cols(), 2,
raft::Compare<int>()));
ASSERT_TRUE(raft::devArrMatch<float>(out_ref.vals(), out.vals(), 2,
raft::Compare<float>()));
CUDA_CHECK(hipStreamDestroy(stream));
free(out_vals_ref_h);
delete[] in_h_rows;
delete[] in_h_cols;
delete[] in_h_vals;
}
INSTANTIATE_TEST_CASE_P(SparseFilterTests, COORemoveZeros,
::testing::ValuesIn(inputsf));
} // namespace sparse
} // namespace raft
|
3a379917b195e4e3effb9069c4d0cabf020ff8ae.cu
|
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <raft/random/rng.cuh>
#include "../test_utils.h"
#include <raft/sparse/op/sort.h>
#include <raft/mr/device/allocator.hpp>
#include <raft/sparse/coo.cuh>
#include <raft/sparse/op/filter.cuh>
#include <iostream>
namespace raft {
namespace sparse {
template <typename T>
struct SparseFilterInputs {
int m, n, nnz;
unsigned long long int seed;
};
template <typename T>
class SparseFilterTests
: public ::testing::TestWithParam<SparseFilterInputs<T>> {
protected:
void SetUp() override {}
void TearDown() override {}
protected:
SparseFilterInputs<T> params;
};
const std::vector<SparseFilterInputs<float>> inputsf = {{5, 10, 5, 1234ULL}};
typedef SparseFilterTests<float> COORemoveZeros;
TEST_P(COORemoveZeros, Result) {
cudaStream_t stream;
cudaStreamCreate(&stream);
std::shared_ptr<raft::mr::device::allocator> alloc(
new raft::mr::device::default_allocator);
params = ::testing::TestWithParam<SparseFilterInputs<float>>::GetParam();
float *in_h_vals = new float[params.nnz];
COO<float> in(alloc, stream, params.nnz, 5, 5);
raft::random::Rng r(params.seed);
r.uniform(in.vals(), params.nnz, float(-1.0), float(1.0), stream);
raft::update_host(in_h_vals, in.vals(), params.nnz, stream);
in_h_vals[0] = 0;
in_h_vals[2] = 0;
in_h_vals[3] = 0;
int *in_h_rows = new int[params.nnz];
int *in_h_cols = new int[params.nnz];
for (int i = 0; i < params.nnz; i++) {
in_h_rows[i] = params.nnz - i - 1;
in_h_cols[i] = i;
}
raft::update_device(in.rows(), in_h_rows, params.nnz, stream);
raft::update_device(in.cols(), in_h_cols, params.nnz, stream);
raft::update_device(in.vals(), in_h_vals, params.nnz, stream);
op::coo_sort<float>(&in, alloc, stream);
int out_rows_ref_h[2] = {0, 3};
int out_cols_ref_h[2] = {4, 1};
float *out_vals_ref_h = (float *)malloc(2 * sizeof(float));
out_vals_ref_h[0] = in_h_vals[4];
out_vals_ref_h[1] = in_h_vals[1];
COO<float> out_ref(alloc, stream, 2, 5, 5);
COO<float> out(alloc, stream);
raft::update_device(out_ref.rows(), *&out_rows_ref_h, 2, stream);
raft::update_device(out_ref.cols(), *&out_cols_ref_h, 2, stream);
raft::update_device(out_ref.vals(), out_vals_ref_h, 2, stream);
op::coo_remove_zeros<32, float>(&in, &out, alloc, stream);
ASSERT_TRUE(raft::devArrMatch<int>(out_ref.rows(), out.rows(), 2,
raft::Compare<int>()));
ASSERT_TRUE(raft::devArrMatch<int>(out_ref.cols(), out.cols(), 2,
raft::Compare<int>()));
ASSERT_TRUE(raft::devArrMatch<float>(out_ref.vals(), out.vals(), 2,
raft::Compare<float>()));
CUDA_CHECK(cudaStreamDestroy(stream));
free(out_vals_ref_h);
delete[] in_h_rows;
delete[] in_h_cols;
delete[] in_h_vals;
}
INSTANTIATE_TEST_CASE_P(SparseFilterTests, COORemoveZeros,
::testing::ValuesIn(inputsf));
} // namespace sparse
} // namespace raft
|
39f683737f4085da7613ad519219f712cd8da1c7.hip
|
// !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Copyright (c) 2022 NVIDIA Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/fluid/framework/scope_guard.h"
#include "paddle/fluid/operators/fused/fused_gemm_epilogue_op.h"
#include "paddle/fluid/platform/dynload/cublasLt.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename DeviceContext, typename T>
class FusedGemmEpilogueKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
const Tensor* x = ctx.Input<Tensor>("X");
const Tensor* y = ctx.Input<Tensor>("Y");
const Tensor* bias = ctx.Input<Tensor>("Bias");
Tensor* out = ctx.Output<Tensor>("Out");
Tensor* reserve_space = ctx.Output<Tensor>("ReserveSpace");
bool trans_x = ctx.Attr<bool>("trans_x");
bool trans_y = ctx.Attr<bool>("trans_y");
std::string activation = ctx.Attr<std::string>("activation");
VLOG(10) << "trans_x = " << trans_x << " , trans_y = " << trans_y
<< " , activation = " << activation;
bool enable_auxiliary = reserve_space == nullptr ? false : true;
out->mutable_data<T>(ctx.GetPlace());
auto* out_data = out->data<T>();
auto x_mat_dims =
phi::flatten_to_2d(x->dims(), trans_x ? 1 : x->dims().size() - 1);
// (M * K) * (K * N)
int64_t M = trans_x ? x_mat_dims[1] : x_mat_dims[0];
int64_t K = trans_y ? y->dims()[1] : y->dims()[0];
int64_t N = trans_y ? y->dims()[0] : y->dims()[1];
hipDataType mat_type = HIP_R_32F;
hipDataType scale_type = HIP_R_32F;
hipblasComputeType_t compute_type = CUBLAS_COMPUTE_32F;
if (std::is_same<T, paddle::platform::float16>::value) {
mat_type = HIP_R_16F;
}
if (std::is_same<T, double>::value) {
mat_type = HIP_R_64F;
scale_type = HIP_R_64F;
compute_type = CUBLAS_COMPUTE_64F;
}
cublasLtMatmulDesc_t operation_desc = NULL;
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatmulDescCreate(
&operation_desc, compute_type, scale_type));
hipblasOperation_t transx = trans_x ? HIPBLAS_OP_T : HIPBLAS_OP_N;
hipblasOperation_t transy = trans_y ? HIPBLAS_OP_T : HIPBLAS_OP_N;
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
operation_desc, CUBLASLT_MATMUL_DESC_TRANSB, &transx,
sizeof(transx)));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
operation_desc, CUBLASLT_MATMUL_DESC_TRANSA, &transy,
sizeof(transy)));
cublasLtEpilogue_t epiloque_func =
get_epilogue_type_(activation, enable_auxiliary);
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
operation_desc, CUBLASLT_MATMUL_DESC_EPILOGUE, &epiloque_func,
sizeof(epiloque_func)));
const T* bias_data = bias->data<T>();
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
operation_desc, CUBLASLT_MATMUL_DESC_BIAS_POINTER, &bias_data,
sizeof(bias_data)));
if (enable_auxiliary && activation != "none") {
size_t reserve_space_size = 0;
if (activation == "relu") {
// Count in bits.
reserve_space_size = phi::product(out->dims()) / 8;
} else {
reserve_space_size = phi::product(out->dims()) * sizeof(T);
}
reserve_space->mutable_data(ctx.GetPlace(), out->type(),
reserve_space_size);
void* aux_data = reinterpret_cast<void*>(reserve_space->data<T>());
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
operation_desc, CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER,
&aux_data, sizeof(aux_data)));
int64_t aux_ld = N;
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
operation_desc, CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_LD, &aux_ld,
sizeof(aux_ld)));
}
cublasLtMatrixLayout_t x_desc = NULL, y_desc = NULL, out_desc = NULL;
if (trans_x)
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
&x_desc, mat_type, M, K, M));
else
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
&x_desc, mat_type, K, M, K));
if (trans_y)
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
&y_desc, mat_type, K, N, K));
else
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
&y_desc, mat_type, N, K, N));
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
&out_desc, mat_type, N, M, N));
cublasLtHandle_t lt_handle = dev_ctx.cublaslt_handle();
size_t workspace_size = static_cast<size_t>(4) * 1024 * 1024 * 1024;
hipStream_t stream = dev_ctx.stream();
memory::allocation::AllocationPtr workspace =
memory::Alloc(dev_ctx, workspace_size);
double alpha64 = 1.0, beta64 = 0.0;
float alpha32 = 1.0f, beta32 = 0.0f;
void *alpha = nullptr, *beta = nullptr;
if (std::is_same<T, double>::value) {
alpha = &alpha64;
beta = &beta64;
} else {
alpha = &alpha32;
beta = &beta32;
}
const auto* y_data = y->data<T>();
const auto* x_data = x->data<T>();
auto algo = GemmEpilogueAlgoCache::Instance().GetGemmAlgo(
lt_handle, operation_desc, y_desc, x_desc, out_desc, alpha, beta,
y_data, x_data, out_data, stream, workspace->ptr(), workspace_size);
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatmul(
lt_handle, operation_desc, alpha, y_data, y_desc, x_data, x_desc, beta,
out_data, out_desc, out_data, out_desc, algo, workspace->ptr(),
workspace_size, stream));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescDestroy(operation_desc));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutDestroy(y_desc));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutDestroy(x_desc));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutDestroy(out_desc));
}
private:
static cublasLtEpilogue_t get_epilogue_type_(const std::string& activation,
bool enable_auxiliary) {
if (activation == "relu") {
return enable_auxiliary ? CUBLASLT_EPILOGUE_RELU_AUX_BIAS
: CUBLASLT_EPILOGUE_RELU_BIAS;
} else if (activation == "gelu") {
return enable_auxiliary ? CUBLASLT_EPILOGUE_GELU_AUX_BIAS
: CUBLASLT_EPILOGUE_GELU_BIAS;
} else if (activation == "none") {
return CUBLASLT_EPILOGUE_BIAS;
} else {
PADDLE_ENFORCE_EQ(
true, false,
platform::errors::InvalidArgument(
"The activation attribute of fused_gemm_epilogue op should be"
" one of {\"none\", \"relu\", \"gelu\"}. But received %s."
"But received activation=%s.",
activation));
}
}
};
enum FusedGEMMGradInType { kDX = 0, kDY = 1, kDZ = 2 };
template <bool TransX, bool TransY>
struct FusedGEMMGradTrait;
template <>
struct FusedGEMMGradTrait<false, false> {
static constexpr auto kXGradA = FusedGEMMGradInType::kDZ;
static constexpr auto kXGradB = FusedGEMMGradInType::kDY;
static constexpr auto kXGradATrans = false;
static constexpr auto kXGradBTrans = true;
static constexpr auto kYGradA = FusedGEMMGradInType::kDX;
static constexpr auto kYGradB = FusedGEMMGradInType::kDZ;
static constexpr auto kYGradATrans = true;
static constexpr auto kYGradBTrans = false;
};
template <>
struct FusedGEMMGradTrait<true, false> {
static constexpr auto kXGradA = FusedGEMMGradInType::kDY;
static constexpr auto kXGradB = FusedGEMMGradInType::kDZ;
static constexpr auto kXGradATrans = false;
static constexpr auto kXGradBTrans = true;
static constexpr auto kYGradA = FusedGEMMGradInType::kDX;
static constexpr auto kYGradB = FusedGEMMGradInType::kDZ;
static constexpr auto kYGradATrans = false;
static constexpr auto kYGradBTrans = false;
};
template <>
struct FusedGEMMGradTrait<false, true> {
static constexpr auto kXGradA = FusedGEMMGradInType::kDZ;
static constexpr auto kXGradB = FusedGEMMGradInType::kDY;
static constexpr auto kXGradATrans = false;
static constexpr auto kXGradBTrans = false;
static constexpr auto kYGradA = FusedGEMMGradInType::kDZ;
static constexpr auto kYGradB = FusedGEMMGradInType::kDX;
static constexpr auto kYGradATrans = true;
static constexpr auto kYGradBTrans = false;
};
template <>
struct FusedGEMMGradTrait<true, true> {
static constexpr auto kXGradA = FusedGEMMGradInType::kDY;
static constexpr auto kXGradB = FusedGEMMGradInType::kDZ;
static constexpr auto kXGradATrans = true;
static constexpr auto kXGradBTrans = true;
static constexpr auto kYGradA = FusedGEMMGradInType::kDZ;
static constexpr auto kYGradB = FusedGEMMGradInType::kDX;
static constexpr auto kYGradATrans = true;
static constexpr auto kYGradBTrans = true;
};
static constexpr auto BoolToCuBlasEnum(bool transpose) {
return transpose ? HIPBLAS_OP_T : HIPBLAS_OP_N;
}
template <typename DeviceContext, typename T>
class FusedGemmEpilogueGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
bool transpose_x = ctx.Attr<bool>("trans_x");
bool transpose_y = ctx.Attr<bool>("trans_y");
if (transpose_x) {
if (transpose_y) {
ComputeImpl<true, true>(ctx);
} else {
ComputeImpl<true, false>(ctx);
}
} else {
if (transpose_y) {
ComputeImpl<false, true>(ctx);
} else {
ComputeImpl<false, false>(ctx);
}
}
}
private:
template <bool TransX, bool TransY>
static void ComputeImpl(const framework::ExecutionContext& ctx) {
using Trait = FusedGEMMGradTrait<TransX, TransY>;
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
const Tensor* dout = ctx.Input<Tensor>("DOut");
const Tensor* x = ctx.Input<Tensor>("X");
const Tensor* y = ctx.Input<Tensor>("Y");
const Tensor* reserve_space = ctx.Input<Tensor>("ReserveSpace");
Tensor* dx = ctx.Output<Tensor>("DX");
Tensor* dy = ctx.Output<Tensor>("DY");
Tensor* dbias = ctx.Output<Tensor>("DBias");
std::string activation_grad = ctx.Attr<std::string>("activation_grad");
VLOG(10) << "trans_x = " << TransX << " , trans_y = " << TransY
<< " , activation_grad = " << activation_grad;
auto x_mat_dims =
phi::flatten_to_2d(x->dims(), TransX ? 1 : x->dims().size() - 1);
// (M * K) * (K * N)
int64_t M = TransX ? x_mat_dims[1] : x_mat_dims[0];
int64_t K = TransY ? y->dims()[1] : y->dims()[0];
int64_t N = TransY ? y->dims()[0] : y->dims()[1];
VLOG(10) << "M = " << M << " , K = " << K << " , N = " << N;
hipDataType mat_type = HIP_R_32F;
hipDataType scale_type = HIP_R_32F;
hipblasComputeType_t compute_type = CUBLAS_COMPUTE_32F;
if (std::is_same<T, paddle::platform::float16>::value) {
mat_type = HIP_R_16F;
}
if (std::is_same<T, double>::value) {
mat_type = HIP_R_64F;
scale_type = HIP_R_64F;
compute_type = CUBLAS_COMPUTE_64F;
}
cublasLtHandle_t lt_handle = dev_ctx.cublaslt_handle();
size_t workspace_size = static_cast<size_t>(4) * 1024 * 1024 * 1024;
const cublasLtMatmulAlgo_t* algo = nullptr;
hipStream_t stream = dev_ctx.stream();
double alpha64 = 1.0, beta64 = 0.0;
float alpha32 = 1.0f, beta32 = 0.0f;
void *alpha = nullptr, *beta = nullptr;
if (std::is_same<T, double>::value) {
alpha = &alpha64;
beta = &beta64;
} else {
alpha = &alpha32;
beta = &beta32;
}
cublasLtMatrixLayout_t dout_desc = nullptr, dout_trans_desc = nullptr;
cublasLtMatrixLayout_t x_desc = nullptr, x_trans_desc = nullptr;
cublasLtMatrixLayout_t y_desc = nullptr, y_trans_desc = nullptr;
cublasLtMatrixLayout_t dx_desc = nullptr, dy_desc = nullptr;
cublasLtMatmulDesc_t dx_operation_desc = nullptr,
dy_operation_desc = nullptr;
DEFINE_PADDLE_SCOPE_GUARD([&] {
auto descs = {dout_desc, dout_trans_desc, x_desc, x_trans_desc,
y_desc, y_trans_desc, dx_desc, dy_desc};
for (auto desc : descs) {
if (desc) {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutDestroy(desc));
}
}
if (dx_operation_desc) {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescDestroy(dx_operation_desc));
}
if (dy_operation_desc) {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescDestroy(dy_operation_desc));
}
});
auto x_row = TransX ? K : M;
auto x_col = TransX ? M : K;
auto y_row = TransY ? N : K;
auto y_col = TransY ? K : N;
auto z_row = TransX ? N : M;
auto z_col = TransX ? M : N;
// dx = func(dout, y)
if (dx) {
constexpr auto kXGradAIsDZ = (Trait::kXGradA == FusedGEMMGradInType::kDZ);
cublasLtMatrixLayout_t *dx_dout_desc, *dx_y_desc;
if (TransX) {
dx_dout_desc = &dout_trans_desc;
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutCreate(
dx_dout_desc, mat_type, z_row, z_col, z_row));
} else {
dx_dout_desc = &dout_desc;
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutCreate(
dx_dout_desc, mat_type, z_col, z_row, z_col));
}
dx_y_desc = &y_trans_desc;
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
dx_y_desc, mat_type, y_col, y_row, y_col));
auto& a_desc = kXGradAIsDZ ? (*dx_dout_desc) : (*dx_y_desc);
auto& b_desc = kXGradAIsDZ ? (*dx_y_desc) : (*dx_dout_desc);
auto a_trans = BoolToCuBlasEnum(Trait::kXGradATrans);
auto b_trans = BoolToCuBlasEnum(Trait::kXGradBTrans);
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
&dx_desc, mat_type, x_col, x_row, x_col));
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatmulDescCreate(
&dx_operation_desc, compute_type, scale_type));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dx_operation_desc, CUBLASLT_MATMUL_DESC_TRANSB, &a_trans,
sizeof(a_trans)));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dx_operation_desc, CUBLASLT_MATMUL_DESC_TRANSA, &b_trans,
sizeof(b_trans)));
cublasLtEpilogue_t epiloque_func_for_dx =
get_epilogue_type_(activation_grad);
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dx_operation_desc, CUBLASLT_MATMUL_DESC_EPILOGUE,
&epiloque_func_for_dx, sizeof(epiloque_func_for_dx)));
if (activation_grad != "none") {
auto* aux_data = reserve_space->data<T>();
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dx_operation_desc, CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER,
&aux_data, sizeof(aux_data)));
int64_t aux_ld = TransX ? M : K;
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dx_operation_desc, CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_LD,
&aux_ld, sizeof(aux_ld)));
}
auto dx_workspace = memory::Alloc(dev_ctx, workspace_size);
auto* dx_data = dx->mutable_data<T>(ctx.GetPlace());
const auto* y_data = y->data<T>();
const auto* dout_data = dout->data<T>();
const auto* a_data = kXGradAIsDZ ? dout_data : y_data;
const auto* b_data = kXGradAIsDZ ? y_data : dout_data;
auto algo = GemmEpilogueAlgoCache::Instance().GetGemmAlgo(
lt_handle, dx_operation_desc, b_desc, a_desc, dx_desc, alpha, beta,
b_data, a_data, dx_data, stream, dx_workspace->ptr(), workspace_size);
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatmul(
lt_handle, dx_operation_desc, alpha, b_data, b_desc, a_data, a_desc,
beta, dx_data, dx_desc, dx_data, dx_desc, algo, dx_workspace->ptr(),
workspace_size, stream));
}
// dy = func(dout, x)
if (dy) {
constexpr auto kYGradAIsDZ = (Trait::kYGradA == FusedGEMMGradInType::kDZ);
cublasLtMatrixLayout_t *dy_dout_desc = nullptr, *dy_x_desc = nullptr;
if (TransX) {
dy_dout_desc = &dout_trans_desc;
if (dout_trans_desc == nullptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutCreate(
dy_dout_desc, mat_type, z_row, z_col, z_row));
}
} else {
dy_dout_desc = &dout_desc;
if (dout_desc == nullptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutCreate(
dy_dout_desc, mat_type, z_col, z_row, z_col));
}
}
dy_x_desc = &x_trans_desc;
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
dy_x_desc, mat_type, x_col, x_row, x_col));
auto& a_desc = kYGradAIsDZ ? (*dy_dout_desc) : (*dy_x_desc);
auto& b_desc = kYGradAIsDZ ? (*dy_x_desc) : (*dy_dout_desc);
auto a_trans = BoolToCuBlasEnum(Trait::kYGradATrans);
auto b_trans = BoolToCuBlasEnum(Trait::kYGradBTrans);
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
&dy_desc, mat_type, y_col, y_row, y_col));
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatmulDescCreate(
&dy_operation_desc, compute_type, scale_type));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dy_operation_desc, CUBLASLT_MATMUL_DESC_TRANSB, &a_trans,
sizeof(a_trans)));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dy_operation_desc, CUBLASLT_MATMUL_DESC_TRANSA, &b_trans,
sizeof(b_trans)));
cublasLtEpilogue_t epiloque_func_for_dy;
if (dbias == nullptr) {
epiloque_func_for_dy = CUBLASLT_EPILOGUE_DEFAULT;
} else {
if (TransY) {
epiloque_func_for_dy = CUBLASLT_EPILOGUE_BGRADB;
} else {
epiloque_func_for_dy = CUBLASLT_EPILOGUE_BGRADA;
}
}
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dy_operation_desc, CUBLASLT_MATMUL_DESC_EPILOGUE,
&epiloque_func_for_dy, sizeof(epiloque_func_for_dy)));
if (dbias) {
auto* dbias_data = dbias->mutable_data<T>(ctx.GetPlace());
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dy_operation_desc, CUBLASLT_MATMUL_DESC_BIAS_POINTER,
&dbias_data, sizeof(dbias_data)));
}
auto dy_workspace = memory::Alloc(dev_ctx, workspace_size);
auto* dy_data = dy->mutable_data<T>(ctx.GetPlace());
const auto* dout_data = dout->data<T>();
const auto* x_data = x->data<T>();
const auto* a_data = kYGradAIsDZ ? dout_data : x_data;
const auto* b_data = kYGradAIsDZ ? x_data : dout_data;
auto algo = GemmEpilogueAlgoCache::Instance().GetGemmAlgo(
lt_handle, dy_operation_desc, b_desc, a_desc, dy_desc, alpha, beta,
b_data, a_data, dy_data, stream, dy_workspace->ptr(), workspace_size);
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatmul(
lt_handle, dy_operation_desc, alpha, b_data, b_desc, a_data, a_desc,
beta, dy_data, dy_desc, dy_data, dy_desc, algo, dy_workspace->ptr(),
workspace_size, stream));
}
}
private:
static cublasLtEpilogue_t get_epilogue_type_(
const std::string& activation_grad) {
if (activation_grad == "relu_grad") {
return CUBLASLT_EPILOGUE_DRELU;
} else if (activation_grad == "gelu_grad") {
return CUBLASLT_EPILOGUE_DGELU;
} else if (activation_grad == "none") {
return CUBLASLT_EPILOGUE_DEFAULT;
} else {
PADDLE_ENFORCE_EQ(
true, false,
platform::errors::InvalidArgument(
"The activation_grad attribute of fused_gemm_epilogue op should "
"be"
" one of {\"none\", \"relu\", \"gelu\"}. But received %s."
"But received activation_grad=%s.",
activation_grad));
}
}
};
} // namespace operators
} // namespace paddle
#if TORCH_HIP_VERSION >= 11060
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
fused_gemm_epilogue,
ops::FusedGemmEpilogueKernel<paddle::platform::CUDADeviceContext, float>,
ops::FusedGemmEpilogueKernel<paddle::platform::CUDADeviceContext, double>,
ops::FusedGemmEpilogueKernel<paddle::platform::CUDADeviceContext,
paddle::platform::float16>);
REGISTER_OP_CUDA_KERNEL(
fused_gemm_epilogue_grad,
ops::FusedGemmEpilogueGradKernel<paddle::platform::CUDADeviceContext,
float>,
ops::FusedGemmEpilogueGradKernel<paddle::platform::CUDADeviceContext,
double>,
ops::FusedGemmEpilogueGradKernel<paddle::platform::CUDADeviceContext,
paddle::platform::float16>);
#endif
|
39f683737f4085da7613ad519219f712cd8da1c7.cu
|
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Copyright (c) 2022 NVIDIA Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/fluid/framework/scope_guard.h"
#include "paddle/fluid/operators/fused/fused_gemm_epilogue_op.h"
#include "paddle/fluid/platform/dynload/cublasLt.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename DeviceContext, typename T>
class FusedGemmEpilogueKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
const Tensor* x = ctx.Input<Tensor>("X");
const Tensor* y = ctx.Input<Tensor>("Y");
const Tensor* bias = ctx.Input<Tensor>("Bias");
Tensor* out = ctx.Output<Tensor>("Out");
Tensor* reserve_space = ctx.Output<Tensor>("ReserveSpace");
bool trans_x = ctx.Attr<bool>("trans_x");
bool trans_y = ctx.Attr<bool>("trans_y");
std::string activation = ctx.Attr<std::string>("activation");
VLOG(10) << "trans_x = " << trans_x << " , trans_y = " << trans_y
<< " , activation = " << activation;
bool enable_auxiliary = reserve_space == nullptr ? false : true;
out->mutable_data<T>(ctx.GetPlace());
auto* out_data = out->data<T>();
auto x_mat_dims =
phi::flatten_to_2d(x->dims(), trans_x ? 1 : x->dims().size() - 1);
// (M * K) * (K * N)
int64_t M = trans_x ? x_mat_dims[1] : x_mat_dims[0];
int64_t K = trans_y ? y->dims()[1] : y->dims()[0];
int64_t N = trans_y ? y->dims()[0] : y->dims()[1];
cudaDataType_t mat_type = CUDA_R_32F;
cudaDataType_t scale_type = CUDA_R_32F;
cublasComputeType_t compute_type = CUBLAS_COMPUTE_32F;
if (std::is_same<T, paddle::platform::float16>::value) {
mat_type = CUDA_R_16F;
}
if (std::is_same<T, double>::value) {
mat_type = CUDA_R_64F;
scale_type = CUDA_R_64F;
compute_type = CUBLAS_COMPUTE_64F;
}
cublasLtMatmulDesc_t operation_desc = NULL;
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatmulDescCreate(
&operation_desc, compute_type, scale_type));
cublasOperation_t transx = trans_x ? CUBLAS_OP_T : CUBLAS_OP_N;
cublasOperation_t transy = trans_y ? CUBLAS_OP_T : CUBLAS_OP_N;
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
operation_desc, CUBLASLT_MATMUL_DESC_TRANSB, &transx,
sizeof(transx)));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
operation_desc, CUBLASLT_MATMUL_DESC_TRANSA, &transy,
sizeof(transy)));
cublasLtEpilogue_t epiloque_func =
get_epilogue_type_(activation, enable_auxiliary);
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
operation_desc, CUBLASLT_MATMUL_DESC_EPILOGUE, &epiloque_func,
sizeof(epiloque_func)));
const T* bias_data = bias->data<T>();
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
operation_desc, CUBLASLT_MATMUL_DESC_BIAS_POINTER, &bias_data,
sizeof(bias_data)));
if (enable_auxiliary && activation != "none") {
size_t reserve_space_size = 0;
if (activation == "relu") {
// Count in bits.
reserve_space_size = phi::product(out->dims()) / 8;
} else {
reserve_space_size = phi::product(out->dims()) * sizeof(T);
}
reserve_space->mutable_data(ctx.GetPlace(), out->type(),
reserve_space_size);
void* aux_data = reinterpret_cast<void*>(reserve_space->data<T>());
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
operation_desc, CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER,
&aux_data, sizeof(aux_data)));
int64_t aux_ld = N;
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
operation_desc, CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_LD, &aux_ld,
sizeof(aux_ld)));
}
cublasLtMatrixLayout_t x_desc = NULL, y_desc = NULL, out_desc = NULL;
if (trans_x)
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
&x_desc, mat_type, M, K, M));
else
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
&x_desc, mat_type, K, M, K));
if (trans_y)
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
&y_desc, mat_type, K, N, K));
else
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
&y_desc, mat_type, N, K, N));
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
&out_desc, mat_type, N, M, N));
cublasLtHandle_t lt_handle = dev_ctx.cublaslt_handle();
size_t workspace_size = static_cast<size_t>(4) * 1024 * 1024 * 1024;
cudaStream_t stream = dev_ctx.stream();
memory::allocation::AllocationPtr workspace =
memory::Alloc(dev_ctx, workspace_size);
double alpha64 = 1.0, beta64 = 0.0;
float alpha32 = 1.0f, beta32 = 0.0f;
void *alpha = nullptr, *beta = nullptr;
if (std::is_same<T, double>::value) {
alpha = &alpha64;
beta = &beta64;
} else {
alpha = &alpha32;
beta = &beta32;
}
const auto* y_data = y->data<T>();
const auto* x_data = x->data<T>();
auto algo = GemmEpilogueAlgoCache::Instance().GetGemmAlgo(
lt_handle, operation_desc, y_desc, x_desc, out_desc, alpha, beta,
y_data, x_data, out_data, stream, workspace->ptr(), workspace_size);
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatmul(
lt_handle, operation_desc, alpha, y_data, y_desc, x_data, x_desc, beta,
out_data, out_desc, out_data, out_desc, algo, workspace->ptr(),
workspace_size, stream));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescDestroy(operation_desc));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutDestroy(y_desc));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutDestroy(x_desc));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutDestroy(out_desc));
}
private:
static cublasLtEpilogue_t get_epilogue_type_(const std::string& activation,
bool enable_auxiliary) {
if (activation == "relu") {
return enable_auxiliary ? CUBLASLT_EPILOGUE_RELU_AUX_BIAS
: CUBLASLT_EPILOGUE_RELU_BIAS;
} else if (activation == "gelu") {
return enable_auxiliary ? CUBLASLT_EPILOGUE_GELU_AUX_BIAS
: CUBLASLT_EPILOGUE_GELU_BIAS;
} else if (activation == "none") {
return CUBLASLT_EPILOGUE_BIAS;
} else {
PADDLE_ENFORCE_EQ(
true, false,
platform::errors::InvalidArgument(
"The activation attribute of fused_gemm_epilogue op should be"
" one of {\"none\", \"relu\", \"gelu\"}. But received %s."
"But received activation=%s.",
activation));
}
}
};
enum FusedGEMMGradInType { kDX = 0, kDY = 1, kDZ = 2 };
template <bool TransX, bool TransY>
struct FusedGEMMGradTrait;
template <>
struct FusedGEMMGradTrait<false, false> {
static constexpr auto kXGradA = FusedGEMMGradInType::kDZ;
static constexpr auto kXGradB = FusedGEMMGradInType::kDY;
static constexpr auto kXGradATrans = false;
static constexpr auto kXGradBTrans = true;
static constexpr auto kYGradA = FusedGEMMGradInType::kDX;
static constexpr auto kYGradB = FusedGEMMGradInType::kDZ;
static constexpr auto kYGradATrans = true;
static constexpr auto kYGradBTrans = false;
};
template <>
struct FusedGEMMGradTrait<true, false> {
static constexpr auto kXGradA = FusedGEMMGradInType::kDY;
static constexpr auto kXGradB = FusedGEMMGradInType::kDZ;
static constexpr auto kXGradATrans = false;
static constexpr auto kXGradBTrans = true;
static constexpr auto kYGradA = FusedGEMMGradInType::kDX;
static constexpr auto kYGradB = FusedGEMMGradInType::kDZ;
static constexpr auto kYGradATrans = false;
static constexpr auto kYGradBTrans = false;
};
template <>
struct FusedGEMMGradTrait<false, true> {
static constexpr auto kXGradA = FusedGEMMGradInType::kDZ;
static constexpr auto kXGradB = FusedGEMMGradInType::kDY;
static constexpr auto kXGradATrans = false;
static constexpr auto kXGradBTrans = false;
static constexpr auto kYGradA = FusedGEMMGradInType::kDZ;
static constexpr auto kYGradB = FusedGEMMGradInType::kDX;
static constexpr auto kYGradATrans = true;
static constexpr auto kYGradBTrans = false;
};
template <>
struct FusedGEMMGradTrait<true, true> {
static constexpr auto kXGradA = FusedGEMMGradInType::kDY;
static constexpr auto kXGradB = FusedGEMMGradInType::kDZ;
static constexpr auto kXGradATrans = true;
static constexpr auto kXGradBTrans = true;
static constexpr auto kYGradA = FusedGEMMGradInType::kDZ;
static constexpr auto kYGradB = FusedGEMMGradInType::kDX;
static constexpr auto kYGradATrans = true;
static constexpr auto kYGradBTrans = true;
};
static constexpr auto BoolToCuBlasEnum(bool transpose) {
return transpose ? CUBLAS_OP_T : CUBLAS_OP_N;
}
template <typename DeviceContext, typename T>
class FusedGemmEpilogueGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
bool transpose_x = ctx.Attr<bool>("trans_x");
bool transpose_y = ctx.Attr<bool>("trans_y");
if (transpose_x) {
if (transpose_y) {
ComputeImpl<true, true>(ctx);
} else {
ComputeImpl<true, false>(ctx);
}
} else {
if (transpose_y) {
ComputeImpl<false, true>(ctx);
} else {
ComputeImpl<false, false>(ctx);
}
}
}
private:
template <bool TransX, bool TransY>
static void ComputeImpl(const framework::ExecutionContext& ctx) {
using Trait = FusedGEMMGradTrait<TransX, TransY>;
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
const Tensor* dout = ctx.Input<Tensor>("DOut");
const Tensor* x = ctx.Input<Tensor>("X");
const Tensor* y = ctx.Input<Tensor>("Y");
const Tensor* reserve_space = ctx.Input<Tensor>("ReserveSpace");
Tensor* dx = ctx.Output<Tensor>("DX");
Tensor* dy = ctx.Output<Tensor>("DY");
Tensor* dbias = ctx.Output<Tensor>("DBias");
std::string activation_grad = ctx.Attr<std::string>("activation_grad");
VLOG(10) << "trans_x = " << TransX << " , trans_y = " << TransY
<< " , activation_grad = " << activation_grad;
auto x_mat_dims =
phi::flatten_to_2d(x->dims(), TransX ? 1 : x->dims().size() - 1);
// (M * K) * (K * N)
int64_t M = TransX ? x_mat_dims[1] : x_mat_dims[0];
int64_t K = TransY ? y->dims()[1] : y->dims()[0];
int64_t N = TransY ? y->dims()[0] : y->dims()[1];
VLOG(10) << "M = " << M << " , K = " << K << " , N = " << N;
cudaDataType_t mat_type = CUDA_R_32F;
cudaDataType_t scale_type = CUDA_R_32F;
cublasComputeType_t compute_type = CUBLAS_COMPUTE_32F;
if (std::is_same<T, paddle::platform::float16>::value) {
mat_type = CUDA_R_16F;
}
if (std::is_same<T, double>::value) {
mat_type = CUDA_R_64F;
scale_type = CUDA_R_64F;
compute_type = CUBLAS_COMPUTE_64F;
}
cublasLtHandle_t lt_handle = dev_ctx.cublaslt_handle();
size_t workspace_size = static_cast<size_t>(4) * 1024 * 1024 * 1024;
const cublasLtMatmulAlgo_t* algo = nullptr;
cudaStream_t stream = dev_ctx.stream();
double alpha64 = 1.0, beta64 = 0.0;
float alpha32 = 1.0f, beta32 = 0.0f;
void *alpha = nullptr, *beta = nullptr;
if (std::is_same<T, double>::value) {
alpha = &alpha64;
beta = &beta64;
} else {
alpha = &alpha32;
beta = &beta32;
}
cublasLtMatrixLayout_t dout_desc = nullptr, dout_trans_desc = nullptr;
cublasLtMatrixLayout_t x_desc = nullptr, x_trans_desc = nullptr;
cublasLtMatrixLayout_t y_desc = nullptr, y_trans_desc = nullptr;
cublasLtMatrixLayout_t dx_desc = nullptr, dy_desc = nullptr;
cublasLtMatmulDesc_t dx_operation_desc = nullptr,
dy_operation_desc = nullptr;
DEFINE_PADDLE_SCOPE_GUARD([&] {
auto descs = {dout_desc, dout_trans_desc, x_desc, x_trans_desc,
y_desc, y_trans_desc, dx_desc, dy_desc};
for (auto desc : descs) {
if (desc) {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutDestroy(desc));
}
}
if (dx_operation_desc) {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescDestroy(dx_operation_desc));
}
if (dy_operation_desc) {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescDestroy(dy_operation_desc));
}
});
auto x_row = TransX ? K : M;
auto x_col = TransX ? M : K;
auto y_row = TransY ? N : K;
auto y_col = TransY ? K : N;
auto z_row = TransX ? N : M;
auto z_col = TransX ? M : N;
// dx = func(dout, y)
if (dx) {
constexpr auto kXGradAIsDZ = (Trait::kXGradA == FusedGEMMGradInType::kDZ);
cublasLtMatrixLayout_t *dx_dout_desc, *dx_y_desc;
if (TransX) {
dx_dout_desc = &dout_trans_desc;
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutCreate(
dx_dout_desc, mat_type, z_row, z_col, z_row));
} else {
dx_dout_desc = &dout_desc;
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutCreate(
dx_dout_desc, mat_type, z_col, z_row, z_col));
}
dx_y_desc = &y_trans_desc;
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
dx_y_desc, mat_type, y_col, y_row, y_col));
auto& a_desc = kXGradAIsDZ ? (*dx_dout_desc) : (*dx_y_desc);
auto& b_desc = kXGradAIsDZ ? (*dx_y_desc) : (*dx_dout_desc);
auto a_trans = BoolToCuBlasEnum(Trait::kXGradATrans);
auto b_trans = BoolToCuBlasEnum(Trait::kXGradBTrans);
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
&dx_desc, mat_type, x_col, x_row, x_col));
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatmulDescCreate(
&dx_operation_desc, compute_type, scale_type));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dx_operation_desc, CUBLASLT_MATMUL_DESC_TRANSB, &a_trans,
sizeof(a_trans)));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dx_operation_desc, CUBLASLT_MATMUL_DESC_TRANSA, &b_trans,
sizeof(b_trans)));
cublasLtEpilogue_t epiloque_func_for_dx =
get_epilogue_type_(activation_grad);
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dx_operation_desc, CUBLASLT_MATMUL_DESC_EPILOGUE,
&epiloque_func_for_dx, sizeof(epiloque_func_for_dx)));
if (activation_grad != "none") {
auto* aux_data = reserve_space->data<T>();
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dx_operation_desc, CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER,
&aux_data, sizeof(aux_data)));
int64_t aux_ld = TransX ? M : K;
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dx_operation_desc, CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_LD,
&aux_ld, sizeof(aux_ld)));
}
auto dx_workspace = memory::Alloc(dev_ctx, workspace_size);
auto* dx_data = dx->mutable_data<T>(ctx.GetPlace());
const auto* y_data = y->data<T>();
const auto* dout_data = dout->data<T>();
const auto* a_data = kXGradAIsDZ ? dout_data : y_data;
const auto* b_data = kXGradAIsDZ ? y_data : dout_data;
auto algo = GemmEpilogueAlgoCache::Instance().GetGemmAlgo(
lt_handle, dx_operation_desc, b_desc, a_desc, dx_desc, alpha, beta,
b_data, a_data, dx_data, stream, dx_workspace->ptr(), workspace_size);
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatmul(
lt_handle, dx_operation_desc, alpha, b_data, b_desc, a_data, a_desc,
beta, dx_data, dx_desc, dx_data, dx_desc, algo, dx_workspace->ptr(),
workspace_size, stream));
}
// dy = func(dout, x)
if (dy) {
constexpr auto kYGradAIsDZ = (Trait::kYGradA == FusedGEMMGradInType::kDZ);
cublasLtMatrixLayout_t *dy_dout_desc = nullptr, *dy_x_desc = nullptr;
if (TransX) {
dy_dout_desc = &dout_trans_desc;
if (dout_trans_desc == nullptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutCreate(
dy_dout_desc, mat_type, z_row, z_col, z_row));
}
} else {
dy_dout_desc = &dout_desc;
if (dout_desc == nullptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutCreate(
dy_dout_desc, mat_type, z_col, z_row, z_col));
}
}
dy_x_desc = &x_trans_desc;
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
dy_x_desc, mat_type, x_col, x_row, x_col));
auto& a_desc = kYGradAIsDZ ? (*dy_dout_desc) : (*dy_x_desc);
auto& b_desc = kYGradAIsDZ ? (*dy_x_desc) : (*dy_dout_desc);
auto a_trans = BoolToCuBlasEnum(Trait::kYGradATrans);
auto b_trans = BoolToCuBlasEnum(Trait::kYGradBTrans);
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
&dy_desc, mat_type, y_col, y_row, y_col));
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatmulDescCreate(
&dy_operation_desc, compute_type, scale_type));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dy_operation_desc, CUBLASLT_MATMUL_DESC_TRANSB, &a_trans,
sizeof(a_trans)));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dy_operation_desc, CUBLASLT_MATMUL_DESC_TRANSA, &b_trans,
sizeof(b_trans)));
cublasLtEpilogue_t epiloque_func_for_dy;
if (dbias == nullptr) {
epiloque_func_for_dy = CUBLASLT_EPILOGUE_DEFAULT;
} else {
if (TransY) {
epiloque_func_for_dy = CUBLASLT_EPILOGUE_BGRADB;
} else {
epiloque_func_for_dy = CUBLASLT_EPILOGUE_BGRADA;
}
}
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dy_operation_desc, CUBLASLT_MATMUL_DESC_EPILOGUE,
&epiloque_func_for_dy, sizeof(epiloque_func_for_dy)));
if (dbias) {
auto* dbias_data = dbias->mutable_data<T>(ctx.GetPlace());
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dy_operation_desc, CUBLASLT_MATMUL_DESC_BIAS_POINTER,
&dbias_data, sizeof(dbias_data)));
}
auto dy_workspace = memory::Alloc(dev_ctx, workspace_size);
auto* dy_data = dy->mutable_data<T>(ctx.GetPlace());
const auto* dout_data = dout->data<T>();
const auto* x_data = x->data<T>();
const auto* a_data = kYGradAIsDZ ? dout_data : x_data;
const auto* b_data = kYGradAIsDZ ? x_data : dout_data;
auto algo = GemmEpilogueAlgoCache::Instance().GetGemmAlgo(
lt_handle, dy_operation_desc, b_desc, a_desc, dy_desc, alpha, beta,
b_data, a_data, dy_data, stream, dy_workspace->ptr(), workspace_size);
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatmul(
lt_handle, dy_operation_desc, alpha, b_data, b_desc, a_data, a_desc,
beta, dy_data, dy_desc, dy_data, dy_desc, algo, dy_workspace->ptr(),
workspace_size, stream));
}
}
private:
static cublasLtEpilogue_t get_epilogue_type_(
const std::string& activation_grad) {
if (activation_grad == "relu_grad") {
return CUBLASLT_EPILOGUE_DRELU;
} else if (activation_grad == "gelu_grad") {
return CUBLASLT_EPILOGUE_DGELU;
} else if (activation_grad == "none") {
return CUBLASLT_EPILOGUE_DEFAULT;
} else {
PADDLE_ENFORCE_EQ(
true, false,
platform::errors::InvalidArgument(
"The activation_grad attribute of fused_gemm_epilogue op should "
"be"
" one of {\"none\", \"relu\", \"gelu\"}. But received %s."
"But received activation_grad=%s.",
activation_grad));
}
}
};
} // namespace operators
} // namespace paddle
#if CUDA_VERSION >= 11060
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
fused_gemm_epilogue,
ops::FusedGemmEpilogueKernel<paddle::platform::CUDADeviceContext, float>,
ops::FusedGemmEpilogueKernel<paddle::platform::CUDADeviceContext, double>,
ops::FusedGemmEpilogueKernel<paddle::platform::CUDADeviceContext,
paddle::platform::float16>);
REGISTER_OP_CUDA_KERNEL(
fused_gemm_epilogue_grad,
ops::FusedGemmEpilogueGradKernel<paddle::platform::CUDADeviceContext,
float>,
ops::FusedGemmEpilogueGradKernel<paddle::platform::CUDADeviceContext,
double>,
ops::FusedGemmEpilogueGradKernel<paddle::platform::CUDADeviceContext,
paddle::platform::float16>);
#endif
|
53cfe5ede573fda0b41edb1935d8f91312aef19f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@precisions normal z -> c d s
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_z
// These routines merge multiple kernels from bicgstab into one.
/* -------------------------------------------------------------------------- */
__global__ void
magma_zbicgstab_1_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex beta,
magmaDoubleComplex omega,
magmaDoubleComplex *r,
magmaDoubleComplex *v,
magmaDoubleComplex *p )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
p[ i+j*num_rows ] = r[ i+j*num_rows ] +
beta * ( p[ i+j*num_rows ] - omega * v[ i+j*num_rows ] );
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
p = r + beta * ( p - omega * v )
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
beta magmaDoubleComplex
scalar
@param[in]
omega magmaDoubleComplex
scalar
@param[in]
r magmaDoubleComplex_ptr
vector
@param[in]
v magmaDoubleComplex_ptr
vector
@param[in,out]
p magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zbicgstab_1(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex beta,
magmaDoubleComplex omega,
magmaDoubleComplex_ptr r,
magmaDoubleComplex_ptr v,
magmaDoubleComplex_ptr p,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_zbicgstab_1_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, beta, omega,
r, v, p );
return MAGMA_SUCCESS;
}
__global__ void
magma_zbicgstab_2_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex_ptr r,
magmaDoubleComplex_ptr v,
magmaDoubleComplex_ptr s )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
s[ i+j*num_rows ] = r[ i+j*num_rows ] - alpha * v[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
s = r - alpha v
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
alpha magmaDoubleComplex
scalar
@param[in]
r magmaDoubleComplex_ptr
vector
@param[in]
v magmaDoubleComplex_ptr
vector
@param[in,out]
s magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zbicgstab_2(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex_ptr r,
magmaDoubleComplex_ptr v,
magmaDoubleComplex_ptr s,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_zbicgstab_2_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, alpha, r, v, s );
return MAGMA_SUCCESS;
}
__global__ void
magma_zbicgstab_3_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex omega,
magmaDoubleComplex *p,
magmaDoubleComplex *s,
magmaDoubleComplex *t,
magmaDoubleComplex *x,
magmaDoubleComplex *r )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
magmaDoubleComplex tmp = s[ i+j*num_rows ];
x[ i+j*num_rows ] = x[ i+j*num_rows ]
+ alpha * p[ i+j*num_rows ] + omega * tmp;
r[ i+j*num_rows ] = tmp - omega * t[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
x = x + alpha * p + omega * s
r = s - omega * t
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
alpha magmaDoubleComplex
scalar
@param[in]
omega magmaDoubleComplex
scalar
@param[in]
p magmaDoubleComplex_ptr
vector
@param[in]
s magmaDoubleComplex_ptr
vector
@param[in]
t magmaDoubleComplex_ptr
vector
@param[in,out]
x magmaDoubleComplex_ptr
vector
@param[in,out]
r magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zbicgstab_3(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex omega,
magmaDoubleComplex_ptr p,
magmaDoubleComplex_ptr s,
magmaDoubleComplex_ptr t,
magmaDoubleComplex_ptr x,
magmaDoubleComplex_ptr r,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_zbicgstab_3_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, alpha, omega, p, s, t, x, r );
return MAGMA_SUCCESS;
}
__global__ void
magma_zbicgstab_4_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex omega,
magmaDoubleComplex *y,
magmaDoubleComplex *z,
magmaDoubleComplex *s,
magmaDoubleComplex *t,
magmaDoubleComplex *x,
magmaDoubleComplex *r )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
x[ i+j*num_rows ] = x[ i+j*num_rows ]
+ alpha * y[ i+j*num_rows ] + omega * z[ i+j*num_rows ];
r[ i+j*num_rows ] = s[ i+j*num_rows ] - omega * t[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
x = x + alpha * y + omega * z
r = s - omega * t
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
alpha magmaDoubleComplex
scalar
@param[in]
omega magmaDoubleComplex
scalar
@param[in]
y magmaDoubleComplex_ptr
vector
@param[in]
z magmaDoubleComplex_ptr
vector
@param[in]
s magmaDoubleComplex_ptr
vector
@param[in]
t magmaDoubleComplex_ptr
vector
@param[in,out]
x magmaDoubleComplex_ptr
vector
@param[in,out]
r magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zbicgstab_4(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex omega,
magmaDoubleComplex_ptr y,
magmaDoubleComplex_ptr z,
magmaDoubleComplex_ptr s,
magmaDoubleComplex_ptr t,
magmaDoubleComplex_ptr x,
magmaDoubleComplex_ptr r,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_zbicgstab_4_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, alpha, omega, y, z, s, t, x, r );
return MAGMA_SUCCESS;
}
|
53cfe5ede573fda0b41edb1935d8f91312aef19f.cu
|
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@precisions normal z -> c d s
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_z
// These routines merge multiple kernels from bicgstab into one.
/* -------------------------------------------------------------------------- */
__global__ void
magma_zbicgstab_1_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex beta,
magmaDoubleComplex omega,
magmaDoubleComplex *r,
magmaDoubleComplex *v,
magmaDoubleComplex *p )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
p[ i+j*num_rows ] = r[ i+j*num_rows ] +
beta * ( p[ i+j*num_rows ] - omega * v[ i+j*num_rows ] );
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
p = r + beta * ( p - omega * v )
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
beta magmaDoubleComplex
scalar
@param[in]
omega magmaDoubleComplex
scalar
@param[in]
r magmaDoubleComplex_ptr
vector
@param[in]
v magmaDoubleComplex_ptr
vector
@param[in,out]
p magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zbicgstab_1(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex beta,
magmaDoubleComplex omega,
magmaDoubleComplex_ptr r,
magmaDoubleComplex_ptr v,
magmaDoubleComplex_ptr p,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_zbicgstab_1_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, beta, omega,
r, v, p );
return MAGMA_SUCCESS;
}
__global__ void
magma_zbicgstab_2_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex_ptr r,
magmaDoubleComplex_ptr v,
magmaDoubleComplex_ptr s )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
s[ i+j*num_rows ] = r[ i+j*num_rows ] - alpha * v[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
s = r - alpha v
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
alpha magmaDoubleComplex
scalar
@param[in]
r magmaDoubleComplex_ptr
vector
@param[in]
v magmaDoubleComplex_ptr
vector
@param[in,out]
s magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zbicgstab_2(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex_ptr r,
magmaDoubleComplex_ptr v,
magmaDoubleComplex_ptr s,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_zbicgstab_2_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, alpha, r, v, s );
return MAGMA_SUCCESS;
}
__global__ void
magma_zbicgstab_3_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex omega,
magmaDoubleComplex *p,
magmaDoubleComplex *s,
magmaDoubleComplex *t,
magmaDoubleComplex *x,
magmaDoubleComplex *r )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
magmaDoubleComplex tmp = s[ i+j*num_rows ];
x[ i+j*num_rows ] = x[ i+j*num_rows ]
+ alpha * p[ i+j*num_rows ] + omega * tmp;
r[ i+j*num_rows ] = tmp - omega * t[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
x = x + alpha * p + omega * s
r = s - omega * t
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
alpha magmaDoubleComplex
scalar
@param[in]
omega magmaDoubleComplex
scalar
@param[in]
p magmaDoubleComplex_ptr
vector
@param[in]
s magmaDoubleComplex_ptr
vector
@param[in]
t magmaDoubleComplex_ptr
vector
@param[in,out]
x magmaDoubleComplex_ptr
vector
@param[in,out]
r magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zbicgstab_3(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex omega,
magmaDoubleComplex_ptr p,
magmaDoubleComplex_ptr s,
magmaDoubleComplex_ptr t,
magmaDoubleComplex_ptr x,
magmaDoubleComplex_ptr r,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_zbicgstab_3_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, alpha, omega, p, s, t, x, r );
return MAGMA_SUCCESS;
}
__global__ void
magma_zbicgstab_4_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex omega,
magmaDoubleComplex *y,
magmaDoubleComplex *z,
magmaDoubleComplex *s,
magmaDoubleComplex *t,
magmaDoubleComplex *x,
magmaDoubleComplex *r )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
x[ i+j*num_rows ] = x[ i+j*num_rows ]
+ alpha * y[ i+j*num_rows ] + omega * z[ i+j*num_rows ];
r[ i+j*num_rows ] = s[ i+j*num_rows ] - omega * t[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
x = x + alpha * y + omega * z
r = s - omega * t
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
alpha magmaDoubleComplex
scalar
@param[in]
omega magmaDoubleComplex
scalar
@param[in]
y magmaDoubleComplex_ptr
vector
@param[in]
z magmaDoubleComplex_ptr
vector
@param[in]
s magmaDoubleComplex_ptr
vector
@param[in]
t magmaDoubleComplex_ptr
vector
@param[in,out]
x magmaDoubleComplex_ptr
vector
@param[in,out]
r magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zbicgstab_4(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex omega,
magmaDoubleComplex_ptr y,
magmaDoubleComplex_ptr z,
magmaDoubleComplex_ptr s,
magmaDoubleComplex_ptr t,
magmaDoubleComplex_ptr x,
magmaDoubleComplex_ptr r,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_zbicgstab_4_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, alpha, omega, y, z, s, t, x, r );
return MAGMA_SUCCESS;
}
|
c4a3421241fea8885049e62e7e55a349d1bfd348.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include<algorithm>
#include <math.h>
#define BLOCK_SIZE 1024
#define COUNT_NUMBER 101
using namespace std;
//INSERT CODE HERE---------------------------------
__global__ void counting(int* g_A, int* g_C,int counting_size ) {
__shared__ int count_arr[2][COUNT_NUMBER];//edit
int tx=threadIdx.x;
int index=tx+blockIdx.x*blockDim.x;
if(tx<COUNT_NUMBER){
count_arr[0][tx]=0;
count_arr[1][tx]=0;
}
__syncthreads();
if(index<counting_size){
atomicAdd(&count_arr[1][g_A[index]],1);
}
int flag1=0;
int flag2=1;
int temp;
if(tx<COUNT_NUMBER){
for (int stride = 1; stride <= 64; stride = stride* 2) {
__syncthreads();
temp=flag1;
flag1=flag2;
flag2=temp;
if(tx-stride>=0){
count_arr[flag2][tx]=count_arr[flag1][tx]+count_arr[flag1][tx-stride];
}
else{
count_arr[flag2][tx]=count_arr[flag1][tx];
}
}
}
if(tx<COUNT_NUMBER){
atomicAdd(&(g_C[tx]),count_arr[flag2][tx]);
}
}
void verify(int* src, int*result, int input_size){
sort(src, src+input_size);
long long match_cnt=0;
for(int i=0; i<input_size;i++)
{
if(src[i]==result[i])
match_cnt++;
}
if(match_cnt==input_size)
printf("TEST PASSED\n\n");
else
printf("TEST FAILED\n\n");
}
void genData(int* ptr, unsigned int size) {
while (size--) {
*ptr++ = (int)(rand() % 101);
}
}
int main(int argc, char* argv[]) {
int* pSource = NULL;
int* pResult = NULL;
int input_size=0;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
if (argc == 2)
input_size=atoi(argv[1]);
else
{
printf("\n Invalid input parameters!"
"\n Usage: ./sort <input_size>"
"\n");
exit(0);
}
//allocate host memory
pSource=(int*)malloc(input_size*sizeof(int));
pResult=(int*)malloc(input_size*sizeof(int));
// generate source data
genData(pSource, input_size);
// start timer
hipEventRecord(start, 0);
//INSERT CODE HERE--------------------
int* pSdev=NULL;
int* pRdev=NULL;
int*pCount=NULL;
pCount=(int*)malloc(COUNT_NUMBER*sizeof(int));
hipMalloc((void**)&pSdev,input_size*sizeof(int));
hipMalloc((void**)&pRdev,COUNT_NUMBER*sizeof(int));
hipMemcpy(pSdev,pSource,input_size*sizeof(int),hipMemcpyHostToDevice);
dim3 dimBlock(BLOCK_SIZE,1,1);
hipLaunchKernelGGL(( counting), dim3(ceil(input_size/float(BLOCK_SIZE))),dim3(dimBlock), 0, 0, pSdev,pRdev,input_size);
hipMemcpy(pCount,pRdev,COUNT_NUMBER*sizeof(int),hipMemcpyDeviceToHost);
// 0 .
for(int i=0;i<=pCount[0]-1;i++){
pResult[i]=0;
}
for (int k=1; k<=101; k++){
for(int i=pCount[k]-1;i>pCount[k-1]-1;i--){
pResult[i]=k;
}
}
// end timer
float time;
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
printf("elapsed time = %f msec\n", time);
hipEventDestroy(start);
hipEventDestroy(stop);
printf("Verifying results..."); fflush(stdout);
verify(pSource, pResult, input_size);
fflush(stdout);
}
|
c4a3421241fea8885049e62e7e55a349d1bfd348.cu
|
#include <stdio.h>
#include <stdlib.h>
#include<algorithm>
#include <math.h>
#define BLOCK_SIZE 1024
#define COUNT_NUMBER 101
using namespace std;
//INSERT CODE HERE---------------------------------
__global__ void counting(int* g_A, int* g_C,int counting_size ) {
__shared__ int count_arr[2][COUNT_NUMBER];//edit
int tx=threadIdx.x;
int index=tx+blockIdx.x*blockDim.x;
if(tx<COUNT_NUMBER){
count_arr[0][tx]=0;
count_arr[1][tx]=0;
}
__syncthreads();
if(index<counting_size){
atomicAdd(&count_arr[1][g_A[index]],1);
}
int flag1=0;
int flag2=1;
int temp;
if(tx<COUNT_NUMBER){
for (int stride = 1; stride <= 64; stride = stride* 2) {
__syncthreads();
temp=flag1;
flag1=flag2;
flag2=temp;
if(tx-stride>=0){
count_arr[flag2][tx]=count_arr[flag1][tx]+count_arr[flag1][tx-stride];
}
else{
count_arr[flag2][tx]=count_arr[flag1][tx];
}
}
}
if(tx<COUNT_NUMBER){
atomicAdd(&(g_C[tx]),count_arr[flag2][tx]);
}
}
void verify(int* src, int*result, int input_size){
sort(src, src+input_size);
long long match_cnt=0;
for(int i=0; i<input_size;i++)
{
if(src[i]==result[i])
match_cnt++;
}
if(match_cnt==input_size)
printf("TEST PASSED\n\n");
else
printf("TEST FAILED\n\n");
}
void genData(int* ptr, unsigned int size) {
while (size--) {
*ptr++ = (int)(rand() % 101);
}
}
int main(int argc, char* argv[]) {
int* pSource = NULL;
int* pResult = NULL;
int input_size=0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
if (argc == 2)
input_size=atoi(argv[1]);
else
{
printf("\n Invalid input parameters!"
"\n Usage: ./sort <input_size>"
"\n");
exit(0);
}
//allocate host memory
pSource=(int*)malloc(input_size*sizeof(int));
pResult=(int*)malloc(input_size*sizeof(int));
// generate source data
genData(pSource, input_size);
// start timer
cudaEventRecord(start, 0);
//INSERT CODE HERE--------------------
int* pSdev=NULL;
int* pRdev=NULL;
int*pCount=NULL;
pCount=(int*)malloc(COUNT_NUMBER*sizeof(int));
cudaMalloc((void**)&pSdev,input_size*sizeof(int));
cudaMalloc((void**)&pRdev,COUNT_NUMBER*sizeof(int));
cudaMemcpy(pSdev,pSource,input_size*sizeof(int),cudaMemcpyHostToDevice);
dim3 dimBlock(BLOCK_SIZE,1,1);
counting<<<ceil(input_size/float(BLOCK_SIZE)),dimBlock>>>(pSdev,pRdev,input_size);
cudaMemcpy(pCount,pRdev,COUNT_NUMBER*sizeof(int),cudaMemcpyDeviceToHost);
// 0일때 또 따시해주자.
for(int i=0;i<=pCount[0]-1;i++){
pResult[i]=0;
}
for (int k=1; k<=101; k++){
for(int i=pCount[k]-1;i>pCount[k-1]-1;i--){
pResult[i]=k;
}
}
// end timer
float time;
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("elapsed time = %f msec\n", time);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("Verifying results..."); fflush(stdout);
verify(pSource, pResult, input_size);
fflush(stdout);
}
|
644be90c96c0cc184d9929f47335d5cf1615b3af.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
using namespace std;
__global__ void vecMat(int *a, int *b, int *c, int n) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int sum = 0;
for (int j = 0; j < n; j++) {
sum += a[row * n + j] * b[j];
}
c[row] = sum;
}
int main() {
int n;
cin >> n;
int *a = new int[n * n];
int *b = new int[n];
int *c = new int[n];
int size = n * sizeof(int);
cout<<"Matrix A: "<<endl;
for (int i = 0; i < n; i++) {
for(int j = 0; j < n; j++) {
cin >> a[i * n + j];
}
}
cout<<"Matrix A is: "<<endl;
for(int i = 0; i < n; i++) {
for(int j = 0; j < n; j++) {
cout << "a[" << i * n + j << "] = " << a[i * n + j] << " ";
}
cout << endl;
}
cout<<"Vector B: "<<endl;
for(int i = 0; i < n; i++) {
cin >> b[i];
}
cout<<"Vector B is: "<<endl;
for(int i = 0; i < n; i++) {
cout << "b[" << i << "] = " <<b[i] << " ";
}
cout<<endl;
int *dev_a, *dev_b, *dev_c;
hipMalloc(&dev_a, n * size);
hipMalloc(&dev_b, size);
hipMalloc(&dev_c, size);
hipMemcpy(dev_a, a, n * size, hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, size, hipMemcpyHostToDevice);
dim3 grid_dim(n, n, 1);
hipLaunchKernelGGL(( vecMat) , dim3(grid_dim), dim3(1) , 0, 0, dev_a, dev_b, dev_c, n);
hipMemcpy(c, dev_c, size, hipMemcpyDeviceToHost);
cout << "Output: " << endl;
for(int i = 0; i < n; i++) {
cout<< "c[" << i << "] = " << c[i] <<" ";
}
}
|
644be90c96c0cc184d9929f47335d5cf1615b3af.cu
|
#include<iostream>
using namespace std;
__global__ void vecMat(int *a, int *b, int *c, int n) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int sum = 0;
for (int j = 0; j < n; j++) {
sum += a[row * n + j] * b[j];
}
c[row] = sum;
}
int main() {
int n;
cin >> n;
int *a = new int[n * n];
int *b = new int[n];
int *c = new int[n];
int size = n * sizeof(int);
cout<<"Matrix A: "<<endl;
for (int i = 0; i < n; i++) {
for(int j = 0; j < n; j++) {
cin >> a[i * n + j];
}
}
cout<<"Matrix A is: "<<endl;
for(int i = 0; i < n; i++) {
for(int j = 0; j < n; j++) {
cout << "a[" << i * n + j << "] = " << a[i * n + j] << " ";
}
cout << endl;
}
cout<<"Vector B: "<<endl;
for(int i = 0; i < n; i++) {
cin >> b[i];
}
cout<<"Vector B is: "<<endl;
for(int i = 0; i < n; i++) {
cout << "b[" << i << "] = " <<b[i] << " ";
}
cout<<endl;
int *dev_a, *dev_b, *dev_c;
cudaMalloc(&dev_a, n * size);
cudaMalloc(&dev_b, size);
cudaMalloc(&dev_c, size);
cudaMemcpy(dev_a, a, n * size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice);
dim3 grid_dim(n, n, 1);
vecMat <<< grid_dim, 1 >>> (dev_a, dev_b, dev_c, n);
cudaMemcpy(c, dev_c, size, cudaMemcpyDeviceToHost);
cout << "Output: " << endl;
for(int i = 0; i < n; i++) {
cout<< "c[" << i << "] = " << c[i] <<" ";
}
}
|
e436d0b97d3a710da3462d878395eff71770f396.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Test.cuh"
#define N 1
__global__ void bitreverse(unsigned int *data)
{
unsigned int *idata = data;
idata[threadIdx.x] = idata[threadIdx.x] * 10;
}
extern "C" float DoSomethingInCuda(float v)
{
unsigned int *d = NULL; int i;
unsigned int idata[N], odata[N];
for (i = 0; i < N; i++)
idata[i] = 1;
hipMalloc((void**)&d, sizeof(int)*N);
hipMemcpy(d, idata, sizeof(int)*N,
hipMemcpyHostToDevice);
hipLaunchKernelGGL(( bitreverse), dim3(1), dim3(N), 0, 0, d);
hipMemcpy(odata, d, sizeof(int)*N,
hipMemcpyDeviceToHost);
v *= odata[0];
hipFree((void*)d);
return v;
}
|
e436d0b97d3a710da3462d878395eff71770f396.cu
|
#include "Test.cuh"
#define N 1
__global__ void bitreverse(unsigned int *data)
{
unsigned int *idata = data;
idata[threadIdx.x] = idata[threadIdx.x] * 10;
}
extern "C" float DoSomethingInCuda(float v)
{
unsigned int *d = NULL; int i;
unsigned int idata[N], odata[N];
for (i = 0; i < N; i++)
idata[i] = 1;
cudaMalloc((void**)&d, sizeof(int)*N);
cudaMemcpy(d, idata, sizeof(int)*N,
cudaMemcpyHostToDevice);
bitreverse<<<1, N>>>(d);
cudaMemcpy(odata, d, sizeof(int)*N,
cudaMemcpyDeviceToHost);
v *= odata[0];
cudaFree((void*)d);
return v;
}
|
66fee11e782437d075d00c9630486f8ca2b62cce.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <torch/extension.h>
#include <ATen/ATen.h> // C++ front endatentensortensor
#include <math.h>
#include <vector>
#define MAX(a,b) (a > b ? a : b)
#define MIN(a,b) (a < b ? a : b)
#define PI 3.1415926
// #define GRIDDIM 32
#define BLOCKDIM 1024 //32x*32y
texture<float> text_memory_image;
texture<float> text_memory_proj;
template <typename scalar_t> // scalar_ttensor
__device__ void TOF_dist_proj(
scalar_t *proj_value,
// const scalar_t *image,
const float tof_value,
const float x1l, const float y1l, const float x1r, const float y1r,
const float x2l, const float y2l, const float x2r, const float y2r,
const float time_resolution, const float dx, const float dy,
const int nx, const int ny, const int event_num)
{
const float nx2 = nx/2;
const float ny2 = ny/2;
const float tof_sigma = time_resolution * 0.3 / 2.355 / 2;
const float tof_sigma_2 = tof_sigma * tof_sigma;
const float x1c = (x1l + x1r) / 2;
const float y1c = (y1l + y1r) / 2;
const float x2c = (x2l + x2r) / 2;
const float y2c = (y2l + y2r) / 2;
const float L = sqrtf((x1c - x2c) *(x1c - x2c) + (y1c - y2c) * (y1c - y2c));
const float ratio1 = (1 - tof_value / L) / 2;
float d2_tof, w_tof;
if (abs(x1c-x2c) > abs(y1c-y2c))
{
for (int ix=0; ix < nx; ix++)
{
float xc = (ix - nx2+0.5) * dx;
float tof_bin = dx;
if (tof_sigma > 0)
{
d2_tof = ((xc-x1c) / (x2c-x1c) - ratio1) * L;
if (d2_tof <= (3 * tof_sigma))
{
w_tof = expf(-0.5*d2_tof*d2_tof/tof_sigma_2)/sqrtf(2.0*PI*tof_sigma_2)*tof_bin;
}
else
{
w_tof = 0.0;
}
}
else
{
w_tof = 1.0;
}
//d1l-d2r
float kylr = (y1l-y2r)/(x1l-x2r);
float ylr = kylr*(xc-x1l)+y1l+ny2*dy;
//d1r-d2l
float kyrl = (y1r-y2l)/(x1r-x2l);
float yrl = kyrl*(xc-x1r)+y1r+ny2*dy;
float yy1 = MIN(ylr,yrl); // xcdetectorxy
float yy2 = MAX(ylr,yrl);
int cy1 = (int)floor(yy1/dy);
int cy2 = (int)floor(yy2/dy);
for (int iy = MAX(0, cy1); iy < MIN(ny, cy2+1); iy++)
{
float dist_w = (MIN((iy+1) * dy,yy2)-MAX(iy * dy, yy1)) / (yy2-yy1);
atomicAdd(proj_value, tex1Dfetch(text_memory_image, ix + nx * iy) * dist_w * w_tof);
}
}
}
else
{
//float kxlr, xlr, kxrl, xrl,
for (int iy=0; iy < ny; iy++)
{
float yc = (iy - ny2 + 0.5) *dy;
float tof_bin = dy;
if (tof_sigma > 0)
{
d2_tof = ((yc-y1c) / (y2c-y1c) - ratio1)*L;
if (d2_tof <=3 * tof_sigma)
{
w_tof = expf(-0.5 * d2_tof * d2_tof / tof_sigma_2) / sqrtf(2.0 * PI * tof_sigma_2) *tof_bin;
}
else
{
w_tof = 0.0;
}
}
else
{
w_tof = 1.0;
}
//d1l-d2r:
float kxlr = (x1l-x2r)/(y1l-y2r);
float xlr = kxlr*(yc-y1l)+x1l+nx2*dx;
//d1r-d2l:
float kxrl = (x1r-x2l)/(y1r-y2l);
float xrl = kxrl*(yc-y1r)+x1r+nx2*dx;
float xx1 = MIN(xlr,xrl);
float xx2 = MAX(xlr,xrl);
float cx1 = (int)floor(xx1/dx);
float cx2 = (int)floor(xx2/dx);
for (int ix= MAX(0, cx1); ix < MIN(nx, cx2+1); ix++)
{
float dist_w = (MIN((ix+1)*dx,xx2) - MAX(ix*dx,xx1))/(xx2-xx1);
atomicAdd(proj_value, tex1Dfetch(text_memory_image, ix + nx * iy) * dist_w * w_tof);
}
}
}
}
template <typename scalar_t>
__global__ void TOF_dist_proj_kernel(
scalar_t *proj_value,
// const scalar_t *image,
const scalar_t *tof_value,
const scalar_t *x1l, const scalar_t *y1l, const scalar_t *x1r, const scalar_t *y1r,
const scalar_t *x2l, const scalar_t *y2l, const scalar_t *x2r, const scalar_t *y2r,
const float time_resolution, const float dx, const float dy,
const int nx, const int ny, const int event_num)
{
int step = blockDim.x * gridDim.x;
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < event_num; idx += step)
{
TOF_dist_proj(proj_value+idx, tof_value[idx], x1l[idx], y1l[idx], x1r[idx], y1r[idx],
x2l[idx], y2l[idx], x2r[idx], y2r[idx], time_resolution, dx, dy, nx, ny, event_num);
}
}
torch::Tensor TOF_dist_proj_cuda_batchs(
torch::Tensor image_batchs,
torch::Tensor tof_value_batchs,
torch::Tensor x1l_batchs, torch::Tensor y1l_batchs, torch::Tensor x1r_batchs, torch::Tensor y1r_batchs,
torch::Tensor x2l_batchs, torch::Tensor y2l_batchs, torch::Tensor x2r_batchs, torch::Tensor y2r_batchs,
float time_resolution, const float dx, const float dy,
const int nx, const int ny, const int event_num)
{
const int nb_batchs = image_batchs.size(0);
const int nb_channels = image_batchs.size(1);
torch::Tensor proj_batchs = torch::zeros({nb_batchs, nb_channels, event_num, 1}, image_batchs.type());
for (int ibatch = 0; ibatch < nb_batchs; ibatch++)
{
hipBindTexture(0, text_memory_image, image_batchs.data<float>()+ibatch*nx*ny,
nx*ny*sizeof(float)); //image0
dim3 dimBlock = BLOCKDIM;
dim3 dimGrid = (event_num -1)/dimBlock.x + 1;
AT_DISPATCH_FLOATING_TYPES(
at::ScalarType::Float,
"TOF_dist_proj_cuda_batchs",
([&] {
hipLaunchKernelGGL(( TOF_dist_proj_kernel<scalar_t>), dim3(dimGrid), dim3(dimBlock), 0, 0,
proj_batchs.data<scalar_t>()+ibatch*event_num*1,
//image_batchs.data<scalar_t>(),
tof_value_batchs.data<scalar_t>()+ibatch*event_num*1,
x1l_batchs.data<scalar_t>()+ibatch*event_num*1,
y1l_batchs.data<scalar_t>()+ibatch*event_num*1,
x1r_batchs.data<scalar_t>()+ibatch*event_num*1,
y1r_batchs.data<scalar_t>()+ibatch*event_num*1,
x2l_batchs.data<scalar_t>()+ibatch*event_num*1,
y2l_batchs.data<scalar_t>()+ibatch*event_num*1,
x2r_batchs.data<scalar_t>()+ibatch*event_num*1,
y2r_batchs.data<scalar_t>()+ibatch*event_num*1,
time_resolution,
dx,dy,nx,ny,event_num);
}));
hipDeviceSynchronize();
hipUnbindTexture(text_memory_image);
}
return proj_batchs;
}
template <typename scalar_t>
__device__ void TOF_dist_bp(
scalar_t *image_bp,
const float proj_value,
const float tof_value,
const float x1l, const float y1l, const float x1r, const float y1r,
const float x2l, const float y2l, const float x2r, const float y2r,
const float time_resolution, const float dx, const float dy,
const int nx, const int ny)
{
const float nx2 = nx/2;
const float ny2 = ny/2;
const float tof_sigma = time_resolution * 0.3 / 2.355 / 2;
const float tof_sigma_2 = tof_sigma * tof_sigma;
const float x1c = (x1l + x1r) / 2;
const float y1c = (y1l + y1r) / 2;
const float x2c = (x2l + x2r) / 2;
const float y2c = (y2l + y2r) / 2;
const float L = sqrtf((x1c - x2c) * (x1c - x2c) + (y1c - y2c) * (y1c - y2c));
const float ratio1 = (1 - (tof_value / L)) / 2;
if (abs(x1c - x2c) > abs(y1c - y2c))
{
for (int ix = 0; ix < nx; ix++)
{
float xc = (ix - nx2 + 0.5) * dx;
float tof_bin = dx;
float d2_tof, w_tof;
if (tof_sigma > 0)
{
d2_tof = ((xc-x1c) / (x2c-x1c) - ratio1)*L;
if (d2_tof <= 3 * tof_sigma)
{
w_tof = expf(-0.5 * d2_tof * d2_tof / tof_sigma_2) / sqrtf(2.0 * PI * tof_sigma_2) * tof_bin;
}
else
{
w_tof = 0.0;
}
}
else
{
w_tof = 1.0;
}
//d1l-d2r
float kylr = (y1l-y2r)/(x1l-x2r);
float ylr = kylr * (xc - x1l) + y1l + ny2 * dy;
//d1r-d2l
float kyrl = (y1r - y2l) / (x1r - x2l);
float yrl = kyrl * (xc - x1r) + y1r + ny2 * dy;
float yy1 = MIN(ylr,yrl); // xcdetectorxy
float yy2 = MAX(ylr,yrl);
int cy1 = (int)floorf(yy1/dy);
int cy2 = (int)floorf(yy2/dy);
for (int iy=(int)MAX(0, cy1); iy < (int)MIN(ny, cy2+1); iy++)
{
float dist_w = (MIN((iy+1) * dy,yy2) - MAX(iy * dy,yy1)) / dy;
atomicAdd(image_bp + (ix + iy * nx), proj_value * dist_w * w_tof);
}
}
}
else
{
for (int iy=0; iy < ny; iy++)
{
float yc = (iy - ny2 + 0.5) * dy;
float tof_bin = dy;
float d2_tof, w_tof;
if (tof_sigma > 0)
{
d2_tof = (((yc-y1c) / (y2c-y1c)) - ratio1) * L;
if (d2_tof <= 3 * tof_sigma)
{
w_tof = expf(-0.5 * d2_tof * d2_tof / tof_sigma_2) / sqrtf(2.0 * PI * tof_sigma_2) * tof_bin;
}
else
{
w_tof = 0.0;
}
}
else
{
w_tof = 1.0;
}
//d1l-d2r:
float kxlr = (x1l-x2r)/(y1l-y2r);
float xlr = kxlr * (yc-y1l)+x1l+nx2 * dx;
//d1r-d2l:
float kxrl = (x1r-x2l)/(y1r-y2l);
float xrl = kxrl * (yc-y1r)+x1r+nx2 * dx;
float xx1 = MIN(xlr,xrl);
float xx2 = MAX(xlr,xrl);
float cx1 = (int)floorf(xx1/dx);
float cx2 = (int)floorf(xx2/dx);
for (int ix=(int)MAX(0, cx1); ix < (int)MIN(nx, cx2+1); ix++)
{
float dist_w = (MIN((ix+1) * dx,xx2) - MAX(ix * dx,xx1))/dx;
atomicAdd(image_bp + (ix + iy * nx), proj_value * dist_w * w_tof);
}
}
}
}
template <typename scalar_t>
__global__ void TOF_dist_bp_kernel(
scalar_t *image_bp,
// const scalar_t *proj_value,
const scalar_t *tof_value,
const scalar_t *x1l, const scalar_t *y1l, const scalar_t *x1r, const scalar_t *y1r,
const scalar_t *x2l, const scalar_t *y2l, const scalar_t *x2r, const scalar_t *y2r,
const float time_resolution, const float dx, const float dy,
const int nx, const int ny, const int event_num)
{
int step = blockDim.x * gridDim.x;
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < event_num; idx += step)
{
TOF_dist_bp(image_bp, tex1Dfetch(text_memory_proj,idx), tof_value[idx],
x1l[idx], y1l[idx], x1r[idx], y1r[idx],
x2l[idx], y2l[idx], x2r[idx], y2r[idx],
time_resolution, dx, dy,
nx, ny);
}
}
torch::Tensor TOF_dist_bproj_cuda_batchs(
torch::Tensor proj_batchs,
torch::Tensor tof_value_batchs,
torch::Tensor x1l_batchs, torch::Tensor y1l_batchs, torch::Tensor x1r_batchs, torch::Tensor y1r_batchs,
torch::Tensor x2l_batchs, torch::Tensor y2l_batchs, torch::Tensor x2r_batchs, torch::Tensor y2r_batchs,
float time_resolution, const float dx, const float dy,
const int nx, const int ny, const int event_num)
{
const int nb_batchs = proj_batchs.size(0);
const int nb_channels = proj_batchs.size(1);
torch::Tensor back_image_batchs = torch::zeros({nb_batchs, nb_channels, nx, ny}, proj_batchs.type());
dim3 dimBlock = BLOCKDIM;
dim3 dimGrid = (nx * ny -1) / dimBlock.x +1;
for (int ibatch = 0; ibatch < nb_batchs; ibatch++)
{
hipBindTexture(0, text_memory_proj, proj_batchs.data<float>() + ibatch * event_num *1,
event_num * sizeof(float));
AT_DISPATCH_FLOATING_TYPES(
at::ScalarType::Float,
"TOF_dist_bproj_cuda_batchs",
([&]{
hipLaunchKernelGGL(( TOF_dist_bp_kernel<scalar_t>), dim3(dimGrid), dim3(dimBlock), 0, 0,
back_image_batchs.data<scalar_t>()+ibatch*nx*ny,
tof_value_batchs.data<scalar_t>()+ibatch*nx*ny,
x1l_batchs.data<scalar_t>()+ibatch*nx*ny,
y1l_batchs.data<scalar_t>()+ibatch*nx*ny,
x1r_batchs.data<scalar_t>()+ibatch*nx*ny,
y1r_batchs.data<scalar_t>()+ibatch*nx*ny,
x2l_batchs.data<scalar_t>()+ibatch*nx*ny,
y2l_batchs.data<scalar_t>()+ibatch*nx*ny,
x2r_batchs.data<scalar_t>()+ibatch*nx*ny,
y2r_batchs.data<scalar_t>()+ibatch*nx*ny,
time_resolution,
dx,dy,nx,ny,event_num);
}));
hipDeviceSynchronize();
hipUnbindTexture(text_memory_proj);
}
return back_image_batchs;
}
// torch::Tensor bpf_batchs(
// torch::Tensor proj_batchs,
// torch::Tensor tof_value_batchs,
// torch::Tensor x1l_batchs, torch::Tensor y1l_batchs, torch::Tensor x1r_batchs, torch::Tensor y1r_batchs,
// torch::Tensor x2l_batchs, torch::Tensor y2l_batchs, torch::Tensor x2r_batchs, torch::Tensor y2r_batchs,
// float time_resolution, const float dx, const float dy,
// const int nx, const int ny, const int event_num)
// {
// back_image_batchs = TOF_dist_bproj_cuda_batchs(proj_batchs, tof_value_batchs,
// x1l_batchs, y1l_batchs, x1r_batchs,y1r_batchs,
// x1l_batchs, y1l_batchs, x1r_batchs,y1r_batchs,
// time_resolution,
// dx,dy,nx,ny,event_num)
// }
|
66fee11e782437d075d00c9630486f8ca2b62cce.cu
|
#include <cuda.h>
#include <cuda_runtime.h>
#include <torch/extension.h>
#include <ATen/ATen.h> // C++ front end的底层库,aten是一个tensor库,可以将数组封装成一个tensor类
#include <math.h>
#include <vector>
#define MAX(a,b) (a > b ? a : b)
#define MIN(a,b) (a < b ? a : b)
#define PI 3.1415926
// #define GRIDDIM 32
#define BLOCKDIM 1024 //32x*32y
texture<float> text_memory_image;
texture<float> text_memory_proj;
template <typename scalar_t> // scalar_t为tensor类变量在实际运行时的数据类型
__device__ void TOF_dist_proj(
scalar_t *proj_value,
// const scalar_t *image,
const float tof_value,
const float x1l, const float y1l, const float x1r, const float y1r,
const float x2l, const float y2l, const float x2r, const float y2r,
const float time_resolution, const float dx, const float dy,
const int nx, const int ny, const int event_num)
{
const float nx2 = nx/2;
const float ny2 = ny/2;
const float tof_sigma = time_resolution * 0.3 / 2.355 / 2;
const float tof_sigma_2 = tof_sigma * tof_sigma;
const float x1c = (x1l + x1r) / 2;
const float y1c = (y1l + y1r) / 2;
const float x2c = (x2l + x2r) / 2;
const float y2c = (y2l + y2r) / 2;
const float L = sqrtf((x1c - x2c) *(x1c - x2c) + (y1c - y2c) * (y1c - y2c));
const float ratio1 = (1 - tof_value / L) / 2;
float d2_tof, w_tof;
if (abs(x1c-x2c) > abs(y1c-y2c))
{
for (int ix=0; ix < nx; ix++)
{
float xc = (ix - nx2+0.5) * dx;
float tof_bin = dx;
if (tof_sigma > 0)
{
d2_tof = ((xc-x1c) / (x2c-x1c) - ratio1) * L;
if (d2_tof <= (3 * tof_sigma))
{
w_tof = expf(-0.5*d2_tof*d2_tof/tof_sigma_2)/sqrtf(2.0*PI*tof_sigma_2)*tof_bin;
}
else
{
w_tof = 0.0;
}
}
else
{
w_tof = 1.0;
}
//d1l-d2r
float kylr = (y1l-y2r)/(x1l-x2r);
float ylr = kylr*(xc-x1l)+y1l+ny2*dy;
//d1r-d2l
float kyrl = (y1r-y2l)/(x1r-x2l);
float yrl = kyrl*(xc-x1r)+y1r+ny2*dy;
float yy1 = MIN(ylr,yrl); // 横坐标为xc时,detector边缘与x轴的交点中y较小值
float yy2 = MAX(ylr,yrl);
int cy1 = (int)floor(yy1/dy);
int cy2 = (int)floor(yy2/dy);
for (int iy = MAX(0, cy1); iy < MIN(ny, cy2+1); iy++)
{
float dist_w = (MIN((iy+1) * dy,yy2)-MAX(iy * dy, yy1)) / (yy2-yy1);
atomicAdd(proj_value, tex1Dfetch(text_memory_image, ix + nx * iy) * dist_w * w_tof);
}
}
}
else
{
//float kxlr, xlr, kxrl, xrl,
for (int iy=0; iy < ny; iy++)
{
float yc = (iy - ny2 + 0.5) *dy;
float tof_bin = dy;
if (tof_sigma > 0)
{
d2_tof = ((yc-y1c) / (y2c-y1c) - ratio1)*L;
if (d2_tof <=3 * tof_sigma)
{
w_tof = expf(-0.5 * d2_tof * d2_tof / tof_sigma_2) / sqrtf(2.0 * PI * tof_sigma_2) *tof_bin;
}
else
{
w_tof = 0.0;
}
}
else
{
w_tof = 1.0;
}
//d1l-d2r:
float kxlr = (x1l-x2r)/(y1l-y2r);
float xlr = kxlr*(yc-y1l)+x1l+nx2*dx;
//d1r-d2l:
float kxrl = (x1r-x2l)/(y1r-y2l);
float xrl = kxrl*(yc-y1r)+x1r+nx2*dx;
float xx1 = MIN(xlr,xrl);
float xx2 = MAX(xlr,xrl);
float cx1 = (int)floor(xx1/dx);
float cx2 = (int)floor(xx2/dx);
for (int ix= MAX(0, cx1); ix < MIN(nx, cx2+1); ix++)
{
float dist_w = (MIN((ix+1)*dx,xx2) - MAX(ix*dx,xx1))/(xx2-xx1);
atomicAdd(proj_value, tex1Dfetch(text_memory_image, ix + nx * iy) * dist_w * w_tof);
}
}
}
}
template <typename scalar_t>
__global__ void TOF_dist_proj_kernel(
scalar_t *proj_value,
// const scalar_t *image,
const scalar_t *tof_value,
const scalar_t *x1l, const scalar_t *y1l, const scalar_t *x1r, const scalar_t *y1r,
const scalar_t *x2l, const scalar_t *y2l, const scalar_t *x2r, const scalar_t *y2r,
const float time_resolution, const float dx, const float dy,
const int nx, const int ny, const int event_num)
{
int step = blockDim.x * gridDim.x;
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < event_num; idx += step)
{
TOF_dist_proj(proj_value+idx, tof_value[idx], x1l[idx], y1l[idx], x1r[idx], y1r[idx],
x2l[idx], y2l[idx], x2r[idx], y2r[idx], time_resolution, dx, dy, nx, ny, event_num);
}
}
torch::Tensor TOF_dist_proj_cuda_batchs(
torch::Tensor image_batchs,
torch::Tensor tof_value_batchs,
torch::Tensor x1l_batchs, torch::Tensor y1l_batchs, torch::Tensor x1r_batchs, torch::Tensor y1r_batchs,
torch::Tensor x2l_batchs, torch::Tensor y2l_batchs, torch::Tensor x2r_batchs, torch::Tensor y2r_batchs,
float time_resolution, const float dx, const float dy,
const int nx, const int ny, const int event_num)
{
const int nb_batchs = image_batchs.size(0);
const int nb_channels = image_batchs.size(1);
torch::Tensor proj_batchs = torch::zeros({nb_batchs, nb_channels, event_num, 1}, image_batchs.type());
for (int ibatch = 0; ibatch < nb_batchs; ibatch++)
{
cudaBindTexture(0, text_memory_image, image_batchs.data<float>()+ibatch*nx*ny,
nx*ny*sizeof(float)); //将image绑定在纹理内存中,只读,偏移量为0
dim3 dimBlock = BLOCKDIM;
dim3 dimGrid = (event_num -1)/dimBlock.x + 1;
AT_DISPATCH_FLOATING_TYPES(
at::ScalarType::Float,
"TOF_dist_proj_cuda_batchs",
([&] {
TOF_dist_proj_kernel<scalar_t><<<dimGrid, dimBlock>>>(
proj_batchs.data<scalar_t>()+ibatch*event_num*1,
//image_batchs.data<scalar_t>(),
tof_value_batchs.data<scalar_t>()+ibatch*event_num*1,
x1l_batchs.data<scalar_t>()+ibatch*event_num*1,
y1l_batchs.data<scalar_t>()+ibatch*event_num*1,
x1r_batchs.data<scalar_t>()+ibatch*event_num*1,
y1r_batchs.data<scalar_t>()+ibatch*event_num*1,
x2l_batchs.data<scalar_t>()+ibatch*event_num*1,
y2l_batchs.data<scalar_t>()+ibatch*event_num*1,
x2r_batchs.data<scalar_t>()+ibatch*event_num*1,
y2r_batchs.data<scalar_t>()+ibatch*event_num*1,
time_resolution,
dx,dy,nx,ny,event_num);
}));
cudaDeviceSynchronize();
cudaUnbindTexture(text_memory_image);
}
return proj_batchs;
}
template <typename scalar_t>
__device__ void TOF_dist_bp(
scalar_t *image_bp,
const float proj_value,
const float tof_value,
const float x1l, const float y1l, const float x1r, const float y1r,
const float x2l, const float y2l, const float x2r, const float y2r,
const float time_resolution, const float dx, const float dy,
const int nx, const int ny)
{
const float nx2 = nx/2;
const float ny2 = ny/2;
const float tof_sigma = time_resolution * 0.3 / 2.355 / 2;
const float tof_sigma_2 = tof_sigma * tof_sigma;
const float x1c = (x1l + x1r) / 2;
const float y1c = (y1l + y1r) / 2;
const float x2c = (x2l + x2r) / 2;
const float y2c = (y2l + y2r) / 2;
const float L = sqrtf((x1c - x2c) * (x1c - x2c) + (y1c - y2c) * (y1c - y2c));
const float ratio1 = (1 - (tof_value / L)) / 2;
if (abs(x1c - x2c) > abs(y1c - y2c))
{
for (int ix = 0; ix < nx; ix++)
{
float xc = (ix - nx2 + 0.5) * dx;
float tof_bin = dx;
float d2_tof, w_tof;
if (tof_sigma > 0)
{
d2_tof = ((xc-x1c) / (x2c-x1c) - ratio1)*L;
if (d2_tof <= 3 * tof_sigma)
{
w_tof = expf(-0.5 * d2_tof * d2_tof / tof_sigma_2) / sqrtf(2.0 * PI * tof_sigma_2) * tof_bin;
}
else
{
w_tof = 0.0;
}
}
else
{
w_tof = 1.0;
}
//d1l-d2r
float kylr = (y1l-y2r)/(x1l-x2r);
float ylr = kylr * (xc - x1l) + y1l + ny2 * dy;
//d1r-d2l
float kyrl = (y1r - y2l) / (x1r - x2l);
float yrl = kyrl * (xc - x1r) + y1r + ny2 * dy;
float yy1 = MIN(ylr,yrl); // 横坐标为xc时,detector边缘与x轴的交点中y较小值
float yy2 = MAX(ylr,yrl);
int cy1 = (int)floorf(yy1/dy);
int cy2 = (int)floorf(yy2/dy);
for (int iy=(int)MAX(0, cy1); iy < (int)MIN(ny, cy2+1); iy++)
{
float dist_w = (MIN((iy+1) * dy,yy2) - MAX(iy * dy,yy1)) / dy;
atomicAdd(image_bp + (ix + iy * nx), proj_value * dist_w * w_tof);
}
}
}
else
{
for (int iy=0; iy < ny; iy++)
{
float yc = (iy - ny2 + 0.5) * dy;
float tof_bin = dy;
float d2_tof, w_tof;
if (tof_sigma > 0)
{
d2_tof = (((yc-y1c) / (y2c-y1c)) - ratio1) * L;
if (d2_tof <= 3 * tof_sigma)
{
w_tof = expf(-0.5 * d2_tof * d2_tof / tof_sigma_2) / sqrtf(2.0 * PI * tof_sigma_2) * tof_bin;
}
else
{
w_tof = 0.0;
}
}
else
{
w_tof = 1.0;
}
//d1l-d2r:
float kxlr = (x1l-x2r)/(y1l-y2r);
float xlr = kxlr * (yc-y1l)+x1l+nx2 * dx;
//d1r-d2l:
float kxrl = (x1r-x2l)/(y1r-y2l);
float xrl = kxrl * (yc-y1r)+x1r+nx2 * dx;
float xx1 = MIN(xlr,xrl);
float xx2 = MAX(xlr,xrl);
float cx1 = (int)floorf(xx1/dx);
float cx2 = (int)floorf(xx2/dx);
for (int ix=(int)MAX(0, cx1); ix < (int)MIN(nx, cx2+1); ix++)
{
float dist_w = (MIN((ix+1) * dx,xx2) - MAX(ix * dx,xx1))/dx;
atomicAdd(image_bp + (ix + iy * nx), proj_value * dist_w * w_tof);
}
}
}
}
template <typename scalar_t>
__global__ void TOF_dist_bp_kernel(
scalar_t *image_bp,
// const scalar_t *proj_value,
const scalar_t *tof_value,
const scalar_t *x1l, const scalar_t *y1l, const scalar_t *x1r, const scalar_t *y1r,
const scalar_t *x2l, const scalar_t *y2l, const scalar_t *x2r, const scalar_t *y2r,
const float time_resolution, const float dx, const float dy,
const int nx, const int ny, const int event_num)
{
int step = blockDim.x * gridDim.x;
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < event_num; idx += step)
{
TOF_dist_bp(image_bp, tex1Dfetch(text_memory_proj,idx), tof_value[idx],
x1l[idx], y1l[idx], x1r[idx], y1r[idx],
x2l[idx], y2l[idx], x2r[idx], y2r[idx],
time_resolution, dx, dy,
nx, ny);
}
}
torch::Tensor TOF_dist_bproj_cuda_batchs(
torch::Tensor proj_batchs,
torch::Tensor tof_value_batchs,
torch::Tensor x1l_batchs, torch::Tensor y1l_batchs, torch::Tensor x1r_batchs, torch::Tensor y1r_batchs,
torch::Tensor x2l_batchs, torch::Tensor y2l_batchs, torch::Tensor x2r_batchs, torch::Tensor y2r_batchs,
float time_resolution, const float dx, const float dy,
const int nx, const int ny, const int event_num)
{
const int nb_batchs = proj_batchs.size(0);
const int nb_channels = proj_batchs.size(1);
torch::Tensor back_image_batchs = torch::zeros({nb_batchs, nb_channels, nx, ny}, proj_batchs.type());
dim3 dimBlock = BLOCKDIM;
dim3 dimGrid = (nx * ny -1) / dimBlock.x +1;
for (int ibatch = 0; ibatch < nb_batchs; ibatch++)
{
cudaBindTexture(0, text_memory_proj, proj_batchs.data<float>() + ibatch * event_num *1,
event_num * sizeof(float));
AT_DISPATCH_FLOATING_TYPES(
at::ScalarType::Float,
"TOF_dist_bproj_cuda_batchs",
([&]{
TOF_dist_bp_kernel<scalar_t><<<dimGrid, dimBlock>>>(
back_image_batchs.data<scalar_t>()+ibatch*nx*ny,
tof_value_batchs.data<scalar_t>()+ibatch*nx*ny,
x1l_batchs.data<scalar_t>()+ibatch*nx*ny,
y1l_batchs.data<scalar_t>()+ibatch*nx*ny,
x1r_batchs.data<scalar_t>()+ibatch*nx*ny,
y1r_batchs.data<scalar_t>()+ibatch*nx*ny,
x2l_batchs.data<scalar_t>()+ibatch*nx*ny,
y2l_batchs.data<scalar_t>()+ibatch*nx*ny,
x2r_batchs.data<scalar_t>()+ibatch*nx*ny,
y2r_batchs.data<scalar_t>()+ibatch*nx*ny,
time_resolution,
dx,dy,nx,ny,event_num);
}));
cudaDeviceSynchronize();
cudaUnbindTexture(text_memory_proj);
}
return back_image_batchs;
}
// torch::Tensor bpf_batchs(
// torch::Tensor proj_batchs,
// torch::Tensor tof_value_batchs,
// torch::Tensor x1l_batchs, torch::Tensor y1l_batchs, torch::Tensor x1r_batchs, torch::Tensor y1r_batchs,
// torch::Tensor x2l_batchs, torch::Tensor y2l_batchs, torch::Tensor x2r_batchs, torch::Tensor y2r_batchs,
// float time_resolution, const float dx, const float dy,
// const int nx, const int ny, const int event_num)
// {
// back_image_batchs = TOF_dist_bproj_cuda_batchs(proj_batchs, tof_value_batchs,
// x1l_batchs, y1l_batchs, x1r_batchs,y1r_batchs,
// x1l_batchs, y1l_batchs, x1r_batchs,y1r_batchs,
// time_resolution,
// dx,dy,nx,ny,event_num)
// }
|
fe3935532631808618e8a38a050a107c5b39db29.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
#include <string>
#include "loadSaveImage.h"
#include <thrust/extrema.h>
// chroma-LogLuminance Space
static float *d_x__;
static float *d_y__;
static float *d_logY__;
// memory for the cdf
static unsigned int *d_cdf__;
static const int numBins = 1024;
size_t numRows__;
size_t numCols__;
/* Copied from Mike's IPython notebook with some minor modifications
* Mainly double precision constants to floats and log10 -> log10f
* Also removed Luminance (Y) channel since it is never used eke*/
__global__ void rgb_to_xyY(float *d_r, float *d_g, float *d_b, float *d_x,
float *d_y, float *d_log_Y, float delta,
int num_pixels_y, int num_pixels_x) {
int ny = num_pixels_y;
int nx = num_pixels_x;
int2 image_index_2d = make_int2((blockIdx.x * blockDim.x) + threadIdx.x,
(blockIdx.y * blockDim.y) + threadIdx.y);
int image_index_1d = (nx * image_index_2d.y) + image_index_2d.x;
if (image_index_2d.x < nx && image_index_2d.y < ny) {
float r = d_r[image_index_1d];
float g = d_g[image_index_1d];
float b = d_b[image_index_1d];
float X = (r * 0.4124f) + (g * 0.3576f) + (b * 0.1805f);
float Y = (r * 0.2126f) + (g * 0.7152f) + (b * 0.0722f);
float Z = (r * 0.0193f) + (g * 0.1192f) + (b * 0.9505f);
float L = X + Y + Z;
float x = X / L;
float y = Y / L;
float log_Y = log10f(delta + Y);
d_x[image_index_1d] = x;
d_y[image_index_1d] = y;
d_log_Y[image_index_1d] = log_Y;
}
}
/* Copied from Mike's IPython notebook *
Modified just by having threads read the
normalization constant directly from device memory
instead of copying it back */
__global__ void normalize_cdf(unsigned int *d_input_cdf, float *d_output_cdf,
int n) {
const float normalization_constant = 1.f / d_input_cdf[n - 1];
int global_index_1d = (blockIdx.x * blockDim.x) + threadIdx.x;
if (global_index_1d < n) {
unsigned int input_value = d_input_cdf[global_index_1d];
float output_value = input_value * normalization_constant;
d_output_cdf[global_index_1d] = output_value;
}
}
/* Copied from Mike's IPython notebook *
Modified double constants -> float *
Perform tone mapping based upon new *
luminance scaling */
__global__ void tonemap(float *d_x, float *d_y, float *d_log_Y,
float *d_cdf_norm, float *d_r_new, float *d_g_new,
float *d_b_new, float min_log_Y, float max_log_Y,
float log_Y_range, int num_bins, int num_pixels_y,
int num_pixels_x) {
int ny = num_pixels_y;
int nx = num_pixels_x;
int2 image_index_2d = make_int2((blockIdx.x * blockDim.x) + threadIdx.x,
(blockIdx.y * blockDim.y) + threadIdx.y);
int image_index_1d = (nx * image_index_2d.y) + image_index_2d.x;
if (image_index_2d.x < nx && image_index_2d.y < ny) {
float x = d_x[image_index_1d];
float y = d_y[image_index_1d];
float log_Y = d_log_Y[image_index_1d];
int bin_index =
min(num_bins - 1, int((num_bins * (log_Y - min_log_Y)) / log_Y_range));
float Y_new = d_cdf_norm[bin_index];
float X_new = x * (Y_new / y);
float Z_new = (1 - x - y) * (Y_new / y);
float r_new = (X_new * 3.2406f) + (Y_new * -1.5372f) + (Z_new * -0.4986f);
float g_new = (X_new * -0.9689f) + (Y_new * 1.8758f) + (Z_new * 0.0415f);
float b_new = (X_new * 0.0557f) + (Y_new * -0.2040f) + (Z_new * 1.0570f);
d_r_new[image_index_1d] = r_new;
d_g_new[image_index_1d] = g_new;
d_b_new[image_index_1d] = b_new;
}
}
// return types are void since any internal error will be handled by quitting
// no point in returning error codes...
void preProcess(float **d_luminance, unsigned int **d_cdf, size_t *numRows,
size_t *numCols, unsigned int *numberOfBins,
const std::string &filename) {
// make sure the context initializes ok
checkCudaErrors(hipFree(0));
float *imgPtr; // we will become responsible for this pointer
loadImageHDR(filename, &imgPtr, &numRows__, &numCols__);
*numRows = numRows__;
*numCols = numCols__;
// first thing to do is split incoming BGR float data into separate channels
size_t numPixels = numRows__ * numCols__;
float *red = new float[numPixels];
float *green = new float[numPixels];
float *blue = new float[numPixels];
// Remeber image is loaded BGR
for (size_t i = 0; i < numPixels; ++i) {
blue[i] = imgPtr[3 * i + 0];
green[i] = imgPtr[3 * i + 1];
red[i] = imgPtr[3 * i + 2];
}
delete[] imgPtr; // being good citizens are releasing resources
// allocated in loadImageHDR
float *d_red, *d_green, *d_blue; // RGB space
size_t channelSize = sizeof(float) * numPixels;
checkCudaErrors(hipMalloc(&d_red, channelSize));
checkCudaErrors(hipMalloc(&d_green, channelSize));
checkCudaErrors(hipMalloc(&d_blue, channelSize));
checkCudaErrors(hipMalloc(&d_x__, channelSize));
checkCudaErrors(hipMalloc(&d_y__, channelSize));
checkCudaErrors(hipMalloc(&d_logY__, channelSize));
checkCudaErrors(hipMemcpy(d_red, red, channelSize, hipMemcpyHostToDevice));
checkCudaErrors(
hipMemcpy(d_green, green, channelSize, hipMemcpyHostToDevice));
checkCudaErrors(
hipMemcpy(d_blue, blue, channelSize, hipMemcpyHostToDevice));
// convert from RGB space to chrominance/luminance space xyY
const dim3 blockSize(32, 16, 1);
const dim3 gridSize((numCols__ + blockSize.x - 1) / blockSize.x,
(numRows__ + blockSize.y - 1) / blockSize.y, 1);
hipLaunchKernelGGL(( rgb_to_xyY), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_green, d_blue, d_x__, d_y__,
d_logY__, .0001f, numRows__, numCols__);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
*d_luminance = d_logY__;
// allocate memory for the cdf of the histogram
*numberOfBins = numBins;
checkCudaErrors(hipMalloc(&d_cdf__, sizeof(unsigned int) * numBins));
checkCudaErrors(hipMemset(d_cdf__, 0, sizeof(unsigned int) * numBins));
*d_cdf = d_cdf__;
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
delete[] red;
delete[] green;
delete[] blue;
}
void postProcess(const std::string &output_file, size_t numRows, size_t numCols,
float min_log_Y, float max_log_Y) {
const int numPixels = numRows__ * numCols__;
const int numThreads = 192;
float *d_cdf_normalized;
checkCudaErrors(hipMalloc(&d_cdf_normalized, sizeof(float) * numBins));
// first normalize the cdf to a maximum value of 1
// this is how we compress the range of the luminance channel
hipLaunchKernelGGL(( normalize_cdf), dim3((numBins + numThreads - 1) / numThreads), dim3(numThreads), 0, 0,
d_cdf__, d_cdf_normalized, numBins);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
// allocate memory for the output RGB channels
float *h_red, *h_green, *h_blue;
float *d_red, *d_green, *d_blue;
h_red = new float[numPixels];
h_green = new float[numPixels];
h_blue = new float[numPixels];
checkCudaErrors(hipMalloc(&d_red, sizeof(float) * numPixels));
checkCudaErrors(hipMalloc(&d_green, sizeof(float) * numPixels));
checkCudaErrors(hipMalloc(&d_blue, sizeof(float) * numPixels));
float log_Y_range = max_log_Y - min_log_Y;
const dim3 blockSize(32, 16, 1);
const dim3 gridSize((numCols + blockSize.x - 1) / blockSize.x,
(numRows + blockSize.y - 1) / blockSize.y);
// next perform the actual tone-mapping
// we map each luminance value to its new value
// and then transform back to RGB space
hipLaunchKernelGGL(( tonemap), dim3(gridSize), dim3(blockSize), 0, 0, d_x__, d_y__, d_logY__, d_cdf_normalized,
d_red, d_green, d_blue, min_log_Y, max_log_Y,
log_Y_range, numBins, numRows, numCols);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipMemcpy(h_red, d_red, sizeof(float) * numPixels,
hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_green, d_green, sizeof(float) * numPixels,
hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_blue, d_blue, sizeof(float) * numPixels,
hipMemcpyDeviceToHost));
// recombine the image channels
float *imageHDR = new float[numPixels * 3];
for (int i = 0; i < numPixels; ++i) {
imageHDR[3 * i + 0] = h_blue[i];
imageHDR[3 * i + 1] = h_green[i];
imageHDR[3 * i + 2] = h_red[i];
}
saveImageHDR(imageHDR, numRows, numCols, output_file);
delete[] imageHDR;
delete[] h_red;
delete[] h_green;
delete[] h_blue;
// cleanup
checkCudaErrors(hipFree(d_cdf_normalized));
}
void cleanupGlobalMemory(void) {
checkCudaErrors(hipFree(d_x__));
checkCudaErrors(hipFree(d_y__));
checkCudaErrors(hipFree(d_logY__));
checkCudaErrors(hipFree(d_cdf__));
}
|
fe3935532631808618e8a38a050a107c5b39db29.cu
|
#include "utils.h"
#include <string>
#include "loadSaveImage.h"
#include <thrust/extrema.h>
// chroma-LogLuminance Space
static float *d_x__;
static float *d_y__;
static float *d_logY__;
// memory for the cdf
static unsigned int *d_cdf__;
static const int numBins = 1024;
size_t numRows__;
size_t numCols__;
/* Copied from Mike's IPython notebook with some minor modifications
* Mainly double precision constants to floats and log10 -> log10f
* Also removed Luminance (Y) channel since it is never used eke*/
__global__ void rgb_to_xyY(float *d_r, float *d_g, float *d_b, float *d_x,
float *d_y, float *d_log_Y, float delta,
int num_pixels_y, int num_pixels_x) {
int ny = num_pixels_y;
int nx = num_pixels_x;
int2 image_index_2d = make_int2((blockIdx.x * blockDim.x) + threadIdx.x,
(blockIdx.y * blockDim.y) + threadIdx.y);
int image_index_1d = (nx * image_index_2d.y) + image_index_2d.x;
if (image_index_2d.x < nx && image_index_2d.y < ny) {
float r = d_r[image_index_1d];
float g = d_g[image_index_1d];
float b = d_b[image_index_1d];
float X = (r * 0.4124f) + (g * 0.3576f) + (b * 0.1805f);
float Y = (r * 0.2126f) + (g * 0.7152f) + (b * 0.0722f);
float Z = (r * 0.0193f) + (g * 0.1192f) + (b * 0.9505f);
float L = X + Y + Z;
float x = X / L;
float y = Y / L;
float log_Y = log10f(delta + Y);
d_x[image_index_1d] = x;
d_y[image_index_1d] = y;
d_log_Y[image_index_1d] = log_Y;
}
}
/* Copied from Mike's IPython notebook *
Modified just by having threads read the
normalization constant directly from device memory
instead of copying it back */
__global__ void normalize_cdf(unsigned int *d_input_cdf, float *d_output_cdf,
int n) {
const float normalization_constant = 1.f / d_input_cdf[n - 1];
int global_index_1d = (blockIdx.x * blockDim.x) + threadIdx.x;
if (global_index_1d < n) {
unsigned int input_value = d_input_cdf[global_index_1d];
float output_value = input_value * normalization_constant;
d_output_cdf[global_index_1d] = output_value;
}
}
/* Copied from Mike's IPython notebook *
Modified double constants -> float *
Perform tone mapping based upon new *
luminance scaling */
__global__ void tonemap(float *d_x, float *d_y, float *d_log_Y,
float *d_cdf_norm, float *d_r_new, float *d_g_new,
float *d_b_new, float min_log_Y, float max_log_Y,
float log_Y_range, int num_bins, int num_pixels_y,
int num_pixels_x) {
int ny = num_pixels_y;
int nx = num_pixels_x;
int2 image_index_2d = make_int2((blockIdx.x * blockDim.x) + threadIdx.x,
(blockIdx.y * blockDim.y) + threadIdx.y);
int image_index_1d = (nx * image_index_2d.y) + image_index_2d.x;
if (image_index_2d.x < nx && image_index_2d.y < ny) {
float x = d_x[image_index_1d];
float y = d_y[image_index_1d];
float log_Y = d_log_Y[image_index_1d];
int bin_index =
min(num_bins - 1, int((num_bins * (log_Y - min_log_Y)) / log_Y_range));
float Y_new = d_cdf_norm[bin_index];
float X_new = x * (Y_new / y);
float Z_new = (1 - x - y) * (Y_new / y);
float r_new = (X_new * 3.2406f) + (Y_new * -1.5372f) + (Z_new * -0.4986f);
float g_new = (X_new * -0.9689f) + (Y_new * 1.8758f) + (Z_new * 0.0415f);
float b_new = (X_new * 0.0557f) + (Y_new * -0.2040f) + (Z_new * 1.0570f);
d_r_new[image_index_1d] = r_new;
d_g_new[image_index_1d] = g_new;
d_b_new[image_index_1d] = b_new;
}
}
// return types are void since any internal error will be handled by quitting
// no point in returning error codes...
void preProcess(float **d_luminance, unsigned int **d_cdf, size_t *numRows,
size_t *numCols, unsigned int *numberOfBins,
const std::string &filename) {
// make sure the context initializes ok
checkCudaErrors(cudaFree(0));
float *imgPtr; // we will become responsible for this pointer
loadImageHDR(filename, &imgPtr, &numRows__, &numCols__);
*numRows = numRows__;
*numCols = numCols__;
// first thing to do is split incoming BGR float data into separate channels
size_t numPixels = numRows__ * numCols__;
float *red = new float[numPixels];
float *green = new float[numPixels];
float *blue = new float[numPixels];
// Remeber image is loaded BGR
for (size_t i = 0; i < numPixels; ++i) {
blue[i] = imgPtr[3 * i + 0];
green[i] = imgPtr[3 * i + 1];
red[i] = imgPtr[3 * i + 2];
}
delete[] imgPtr; // being good citizens are releasing resources
// allocated in loadImageHDR
float *d_red, *d_green, *d_blue; // RGB space
size_t channelSize = sizeof(float) * numPixels;
checkCudaErrors(cudaMalloc(&d_red, channelSize));
checkCudaErrors(cudaMalloc(&d_green, channelSize));
checkCudaErrors(cudaMalloc(&d_blue, channelSize));
checkCudaErrors(cudaMalloc(&d_x__, channelSize));
checkCudaErrors(cudaMalloc(&d_y__, channelSize));
checkCudaErrors(cudaMalloc(&d_logY__, channelSize));
checkCudaErrors(cudaMemcpy(d_red, red, channelSize, cudaMemcpyHostToDevice));
checkCudaErrors(
cudaMemcpy(d_green, green, channelSize, cudaMemcpyHostToDevice));
checkCudaErrors(
cudaMemcpy(d_blue, blue, channelSize, cudaMemcpyHostToDevice));
// convert from RGB space to chrominance/luminance space xyY
const dim3 blockSize(32, 16, 1);
const dim3 gridSize((numCols__ + blockSize.x - 1) / blockSize.x,
(numRows__ + blockSize.y - 1) / blockSize.y, 1);
rgb_to_xyY<<<gridSize, blockSize>>>(d_red, d_green, d_blue, d_x__, d_y__,
d_logY__, .0001f, numRows__, numCols__);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
*d_luminance = d_logY__;
// allocate memory for the cdf of the histogram
*numberOfBins = numBins;
checkCudaErrors(cudaMalloc(&d_cdf__, sizeof(unsigned int) * numBins));
checkCudaErrors(cudaMemset(d_cdf__, 0, sizeof(unsigned int) * numBins));
*d_cdf = d_cdf__;
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
delete[] red;
delete[] green;
delete[] blue;
}
void postProcess(const std::string &output_file, size_t numRows, size_t numCols,
float min_log_Y, float max_log_Y) {
const int numPixels = numRows__ * numCols__;
const int numThreads = 192;
float *d_cdf_normalized;
checkCudaErrors(cudaMalloc(&d_cdf_normalized, sizeof(float) * numBins));
// first normalize the cdf to a maximum value of 1
// this is how we compress the range of the luminance channel
normalize_cdf<<<(numBins + numThreads - 1) / numThreads, numThreads>>>(
d_cdf__, d_cdf_normalized, numBins);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
// allocate memory for the output RGB channels
float *h_red, *h_green, *h_blue;
float *d_red, *d_green, *d_blue;
h_red = new float[numPixels];
h_green = new float[numPixels];
h_blue = new float[numPixels];
checkCudaErrors(cudaMalloc(&d_red, sizeof(float) * numPixels));
checkCudaErrors(cudaMalloc(&d_green, sizeof(float) * numPixels));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(float) * numPixels));
float log_Y_range = max_log_Y - min_log_Y;
const dim3 blockSize(32, 16, 1);
const dim3 gridSize((numCols + blockSize.x - 1) / blockSize.x,
(numRows + blockSize.y - 1) / blockSize.y);
// next perform the actual tone-mapping
// we map each luminance value to its new value
// and then transform back to RGB space
tonemap<<<gridSize, blockSize>>>(d_x__, d_y__, d_logY__, d_cdf_normalized,
d_red, d_green, d_blue, min_log_Y, max_log_Y,
log_Y_range, numBins, numRows, numCols);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaMemcpy(h_red, d_red, sizeof(float) * numPixels,
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_green, d_green, sizeof(float) * numPixels,
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_blue, d_blue, sizeof(float) * numPixels,
cudaMemcpyDeviceToHost));
// recombine the image channels
float *imageHDR = new float[numPixels * 3];
for (int i = 0; i < numPixels; ++i) {
imageHDR[3 * i + 0] = h_blue[i];
imageHDR[3 * i + 1] = h_green[i];
imageHDR[3 * i + 2] = h_red[i];
}
saveImageHDR(imageHDR, numRows, numCols, output_file);
delete[] imageHDR;
delete[] h_red;
delete[] h_green;
delete[] h_blue;
// cleanup
checkCudaErrors(cudaFree(d_cdf_normalized));
}
void cleanupGlobalMemory(void) {
checkCudaErrors(cudaFree(d_x__));
checkCudaErrors(cudaFree(d_y__));
checkCudaErrors(cudaFree(d_logY__));
checkCudaErrors(cudaFree(d_cdf__));
}
|
c7f5ffb2bd6f5f144cf4c37087edd8473c9fee67.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* orig_J6M_v2.cu
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
__global__ void
jacobikernel( float* a, float* newa, float* lchange, int n, int m, int THR, float w0, float w1, float w2 )
{
int ti = threadIdx.x;
int tj = threadIdx.y;
int i = blockIdx.x * blockDim.x + ti + 1;
int j = blockIdx.y * blockDim.y + tj + 1;
int TPT = THR + 2;
int TMO = THR - 1;
__shared__ float mychange[23*23];
float mnewa, molda;
mychange[tj*TPT+ti] = a[(j-1)*m+i-1];
if( ti < 2 ) mychange[tj*TPT+ti+THR] = a[(j-1)*m+i+TMO];
if( tj < 2 ) mychange[(tj+THR)*TPT+ti] = a[(j+TMO)*m+i-1];
if( tj < 2 && ti < 2 ) mychange[(tj+THR)*TPT+ti+THR] = a[(j+TMO)*m+i+TMO];
__syncthreads();
molda = mychange[(tj+1)*TPT+(ti+1)];
mnewa = w0*molda +
w1 * (mychange[(tj+1)*TPT+(ti )] + mychange[(tj )*TPT+(ti+1)] +
mychange[(tj+1)*TPT+(ti+2)] + mychange[(tj+2)*TPT+(ti+1)]) +
w2 * (mychange[(tj )*TPT+(ti )] + mychange[(tj+2)*TPT+(ti )] +
mychange[(tj )*TPT+(ti+2)] + mychange[(tj+2)*TPT+(ti+2)]);
newa[j*m+i] = mnewa;
__syncthreads();
int ii = ti+blockDim.x*tj;
mychange[ii] = fabsf( mnewa - molda );
__syncthreads();
int nn = blockDim.x * blockDim.y;
while( (nn>>=1) > 0 ){
if( ii < nn )
mychange[ii] = fmaxf( mychange[ii], mychange[ii+nn] );
__syncthreads();
}
if( ii == 0 )
lchange[blockIdx.x + gridDim.x*blockIdx.y] = mychange[0];
}
__global__ void
reductionkernel( float* lchange, int n, int THR )
{
__shared__ float mychange[23*23];
float mych = 0.0f;
int ii = threadIdx.x, m;
if( ii < n ) mych = lchange[ii];
m = blockDim.x;
while( m <= n ){
if(ii+m < n)
mych = fmaxf( mych, lchange[ii+m] );
m += blockDim.x;
}
mychange[ii] = mych;
__syncthreads();
int nn = blockDim.x;
while( (nn>>=1) > 0 ){
if( ii < nn )
mychange[ii] = fmaxf(mychange[ii],mychange[ii+nn]);
__syncthreads();
}
if( ii == 0 )
lchange[0] = mychange[0];
}
static float sumtime;
void JacobiGPU( float* a, int n, int m, int numThr, float w0, float w1, float w2, float tol )
{
float change;
int iters;
size_t memsize;
int bx, by, gx, gy;
float *da, *dnewa, *lchange;
hipEvent_t e1, e2;
bx = numThr;
by = numThr;
gx = (n-2)/bx + ((n-2)%bx == 0?0:1);
gy = (m-2)/by + ((m-2)%by == 0?0:1);
printf("Number of threads = %i and %i.\nNumber of Grids = %i and %i.\n", bx, by, gx, gy);
sumtime = 0.0f;
memsize = sizeof(float) * n * m;
hipMalloc( &da, memsize );
hipMalloc( &dnewa, memsize );
hipMalloc( &lchange, gx * gy * sizeof(float) );
hipEventCreate( &e1 );
hipEventCreate( &e2 );
dim3 block( bx, by );
dim3 grid( gx, gy );
iters = 0;
hipMemcpy( da, a, memsize, hipMemcpyHostToDevice );
hipMemcpy( dnewa, a, memsize, hipMemcpyHostToDevice );
do{
float msec;
++iters;
hipEventRecord( e1 );
hipLaunchKernelGGL(( jacobikernel), dim3(grid), dim3(block) , 0, 0, da, dnewa, lchange, n, m, numThr, w0, w1, w2 );
hipLaunchKernelGGL(( reductionkernel), dim3(1), dim3(bx*by) , 0, 0, lchange, gx*gy, numThr );
hipEventRecord( e2 );
hipMemcpy( &change, lchange, sizeof(float), hipMemcpyDeviceToHost );
hipEventElapsedTime( &msec, e1, e2 );
sumtime += msec;
float *ta;
ta = da;
da = dnewa;
dnewa = ta;
}while( change > tol );
double time = sumtime/1000.0f;
double dNumOps = 14.0 * iters * n *m;
double gflops = dNumOps/time/1e9;
printf( "JacobiGPU converged in %d iterations to residual %f\n", iters, change );
printf( "JacobiGPU used %.5f seconds total\n", sumtime/1000.0f );
printf( "Size(Number of Operations) = %.0f Ops/sec \n", dNumOps );
printf( "Throughtput = %.4f GFlops/sec \n",gflops );
hipMemcpy( a, dnewa, memsize, hipMemcpyDeviceToHost );
hipFree( da );
hipFree( dnewa );
hipFree( lchange );
hipEventDestroy( e1 );
hipEventDestroy( e2 );
}
static void init( float* a, int n, int m )
{
int i, j;
memset( a, 0, sizeof(float) * n * m );
/* boundary conditions */
for( j = 0; j < n; ++j ){
a[j*m+n-1] = j;
}
for( i = 0; i < m; ++i ){
a[(n-1)*m+i] = i;
}
a[(n-1)*m+m-1] = m+n;
}
int main( int argc, char* argv[] )
{
int n, m;
float *a;
struct timeval tt1, tt2;
int ms;
float fms;
int numThr;
if( argc <= 1 ){
fprintf( stderr, "%s sizen [sizem]\n", argv[0] );
return 1;
}
n = atoi( argv[1] );
if( n <= 0 ) n = 100;
m = n;
if( argc > 3 ){
m = atoi( argv[2] );
numThr = atoi( argv[3] );
if( m <= 0 ) m = 100;
}
printf( "Jacobi %d x %d\n", n, m );
a = (float*)malloc( sizeof(float) * n * m );
init( a, n, m );
gettimeofday( &tt1, NULL );
JacobiGPU( a, n, m, numThr, .2, .1, .1, .1 );
gettimeofday( &tt2, NULL );
ms = (tt2.tv_sec - tt1.tv_sec);
ms = ms * 1000000 + (tt2.tv_usec - tt1.tv_usec);
fms = (float)ms / 1000000.0f;
printf( "time(gpu ) = %f seconds\n", fms );
}
|
c7f5ffb2bd6f5f144cf4c37087edd8473c9fee67.cu
|
/*
* orig_J6M_v2.cu
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
__global__ void
jacobikernel( float* a, float* newa, float* lchange, int n, int m, int THR, float w0, float w1, float w2 )
{
int ti = threadIdx.x;
int tj = threadIdx.y;
int i = blockIdx.x * blockDim.x + ti + 1;
int j = blockIdx.y * blockDim.y + tj + 1;
int TPT = THR + 2;
int TMO = THR - 1;
__shared__ float mychange[23*23];
float mnewa, molda;
mychange[tj*TPT+ti] = a[(j-1)*m+i-1];
if( ti < 2 ) mychange[tj*TPT+ti+THR] = a[(j-1)*m+i+TMO];
if( tj < 2 ) mychange[(tj+THR)*TPT+ti] = a[(j+TMO)*m+i-1];
if( tj < 2 && ti < 2 ) mychange[(tj+THR)*TPT+ti+THR] = a[(j+TMO)*m+i+TMO];
__syncthreads();
molda = mychange[(tj+1)*TPT+(ti+1)];
mnewa = w0*molda +
w1 * (mychange[(tj+1)*TPT+(ti )] + mychange[(tj )*TPT+(ti+1)] +
mychange[(tj+1)*TPT+(ti+2)] + mychange[(tj+2)*TPT+(ti+1)]) +
w2 * (mychange[(tj )*TPT+(ti )] + mychange[(tj+2)*TPT+(ti )] +
mychange[(tj )*TPT+(ti+2)] + mychange[(tj+2)*TPT+(ti+2)]);
newa[j*m+i] = mnewa;
__syncthreads();
int ii = ti+blockDim.x*tj;
mychange[ii] = fabsf( mnewa - molda );
__syncthreads();
int nn = blockDim.x * blockDim.y;
while( (nn>>=1) > 0 ){
if( ii < nn )
mychange[ii] = fmaxf( mychange[ii], mychange[ii+nn] );
__syncthreads();
}
if( ii == 0 )
lchange[blockIdx.x + gridDim.x*blockIdx.y] = mychange[0];
}
__global__ void
reductionkernel( float* lchange, int n, int THR )
{
__shared__ float mychange[23*23];
float mych = 0.0f;
int ii = threadIdx.x, m;
if( ii < n ) mych = lchange[ii];
m = blockDim.x;
while( m <= n ){
if(ii+m < n)
mych = fmaxf( mych, lchange[ii+m] );
m += blockDim.x;
}
mychange[ii] = mych;
__syncthreads();
int nn = blockDim.x;
while( (nn>>=1) > 0 ){
if( ii < nn )
mychange[ii] = fmaxf(mychange[ii],mychange[ii+nn]);
__syncthreads();
}
if( ii == 0 )
lchange[0] = mychange[0];
}
static float sumtime;
void JacobiGPU( float* a, int n, int m, int numThr, float w0, float w1, float w2, float tol )
{
float change;
int iters;
size_t memsize;
int bx, by, gx, gy;
float *da, *dnewa, *lchange;
cudaEvent_t e1, e2;
bx = numThr;
by = numThr;
gx = (n-2)/bx + ((n-2)%bx == 0?0:1);
gy = (m-2)/by + ((m-2)%by == 0?0:1);
printf("Number of threads = %i and %i.\nNumber of Grids = %i and %i.\n", bx, by, gx, gy);
sumtime = 0.0f;
memsize = sizeof(float) * n * m;
cudaMalloc( &da, memsize );
cudaMalloc( &dnewa, memsize );
cudaMalloc( &lchange, gx * gy * sizeof(float) );
cudaEventCreate( &e1 );
cudaEventCreate( &e2 );
dim3 block( bx, by );
dim3 grid( gx, gy );
iters = 0;
cudaMemcpy( da, a, memsize, cudaMemcpyHostToDevice );
cudaMemcpy( dnewa, a, memsize, cudaMemcpyHostToDevice );
do{
float msec;
++iters;
cudaEventRecord( e1 );
jacobikernel<<< grid, block >>>( da, dnewa, lchange, n, m, numThr, w0, w1, w2 );
reductionkernel<<< 1, bx*by >>>( lchange, gx*gy, numThr );
cudaEventRecord( e2 );
cudaMemcpy( &change, lchange, sizeof(float), cudaMemcpyDeviceToHost );
cudaEventElapsedTime( &msec, e1, e2 );
sumtime += msec;
float *ta;
ta = da;
da = dnewa;
dnewa = ta;
}while( change > tol );
double time = sumtime/1000.0f;
double dNumOps = 14.0 * iters * n *m;
double gflops = dNumOps/time/1e9;
printf( "JacobiGPU converged in %d iterations to residual %f\n", iters, change );
printf( "JacobiGPU used %.5f seconds total\n", sumtime/1000.0f );
printf( "Size(Number of Operations) = %.0f Ops/sec \n", dNumOps );
printf( "Throughtput = %.4f GFlops/sec \n",gflops );
cudaMemcpy( a, dnewa, memsize, cudaMemcpyDeviceToHost );
cudaFree( da );
cudaFree( dnewa );
cudaFree( lchange );
cudaEventDestroy( e1 );
cudaEventDestroy( e2 );
}
static void init( float* a, int n, int m )
{
int i, j;
memset( a, 0, sizeof(float) * n * m );
/* boundary conditions */
for( j = 0; j < n; ++j ){
a[j*m+n-1] = j;
}
for( i = 0; i < m; ++i ){
a[(n-1)*m+i] = i;
}
a[(n-1)*m+m-1] = m+n;
}
int main( int argc, char* argv[] )
{
int n, m;
float *a;
struct timeval tt1, tt2;
int ms;
float fms;
int numThr;
if( argc <= 1 ){
fprintf( stderr, "%s sizen [sizem]\n", argv[0] );
return 1;
}
n = atoi( argv[1] );
if( n <= 0 ) n = 100;
m = n;
if( argc > 3 ){
m = atoi( argv[2] );
numThr = atoi( argv[3] );
if( m <= 0 ) m = 100;
}
printf( "Jacobi %d x %d\n", n, m );
a = (float*)malloc( sizeof(float) * n * m );
init( a, n, m );
gettimeofday( &tt1, NULL );
JacobiGPU( a, n, m, numThr, .2, .1, .1, .1 );
gettimeofday( &tt2, NULL );
ms = (tt2.tv_sec - tt1.tv_sec);
ms = ms * 1000000 + (tt2.tv_usec - tt1.tv_usec);
fms = (float)ms / 1000000.0f;
printf( "time(gpu ) = %f seconds\n", fms );
}
|
a31c514456919e86cac778c42cee27d5c38fd42c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "../common/book.h"
#include "../common/gpu_anim.h"
#define DIM 1024
#ifdef _WIN32
#define WIN32_LEAN_AND_MEAN
#endif
__global__ void kernel( uchar4 *ptr, int ticks ) {
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
// now calculate the value at that position
float fx = x - DIM/2;
float fy = y - DIM/2;
float d = sqrtf( fx * fx + fy * fy );
unsigned char grey = (unsigned char)(128.0f + 127.0f *
cos(d/10.0f - ticks/7.0f) /
(d/10.0f + 1.0f));
ptr[offset].x = grey;
ptr[offset].y = grey;
ptr[offset].z = grey;
ptr[offset].w = 255;
}
void generate_frame( uchar4 *pixels, void*, int ticks ) {
dim3 grids(DIM/16,DIM/16);
dim3 threads(16,16);
hipLaunchKernelGGL(( kernel), dim3(grids),dim3(threads), 0, 0, pixels, ticks );
}
int main( void ) {
GPUAnimBitmap bitmap( DIM, DIM, NULL );
bitmap.anim_and_exit(
(void (*)(uchar4*,void*,int))generate_frame, NULL );
}
|
a31c514456919e86cac778c42cee27d5c38fd42c.cu
|
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "../common/book.h"
#include "../common/gpu_anim.h"
#define DIM 1024
#ifdef _WIN32
#define WIN32_LEAN_AND_MEAN
#endif
__global__ void kernel( uchar4 *ptr, int ticks ) {
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
// now calculate the value at that position
float fx = x - DIM/2;
float fy = y - DIM/2;
float d = sqrtf( fx * fx + fy * fy );
unsigned char grey = (unsigned char)(128.0f + 127.0f *
cos(d/10.0f - ticks/7.0f) /
(d/10.0f + 1.0f));
ptr[offset].x = grey;
ptr[offset].y = grey;
ptr[offset].z = grey;
ptr[offset].w = 255;
}
void generate_frame( uchar4 *pixels, void*, int ticks ) {
dim3 grids(DIM/16,DIM/16);
dim3 threads(16,16);
kernel<<<grids,threads>>>( pixels, ticks );
}
int main( void ) {
GPUAnimBitmap bitmap( DIM, DIM, NULL );
bitmap.anim_and_exit(
(void (*)(uchar4*,void*,int))generate_frame, NULL );
}
|
aaeefe207bfcb16fd448927c031dbb160e0f5274.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "book.h"
#define UF 8
__global__ void orcu_kernel6(int n, int orcu_var3, double a1, double* y, double* x1) {
int tid=UF*(blockIdx.x*blockDim.x+threadIdx.x)+orcu_var3;
if (tid<=n-UF) {
{
y[tid]=y[tid]+a1*x1[tid];
int index = tid+1;
y[index]=y[index]+a1*x1[index];
index = tid+2;
y[index]=y[index]+a1*x1[index];
index = tid+3;
y[index]=y[index]+a1*x1[index];
index = tid+4;
y[index]=y[index]+a1*x1[index];
index = tid+5;
y[index]=y[index]+a1*x1[index];
index = tid+6;
y[index]=y[index]+a1*x1[index];
index = tid+7;
y[index]=y[index]+a1*x1[index];
}
}
}
//__global__ void orcu_kernel11(int n, int orcu_var8, double a1, double* y, double* x1) {
//int tid=blockIdx.x*blockDim.x+threadIdx.x+orcu_var8;
//if (tid<=n-1) {
//y[tid]=y[tid]+a1*x1[tid];
//}
//}
void axpy1(int n, double *y, double a1, double *x1)
{
register int i;
/*@ begin Loop(
transform Composite(
cuda = (16,False, False, 1)
,scalarreplace = (False, 'int')
, unrolljam = (['i'], [2])
)
{
for (i=0; i<=n-1; i++) {
y[i]=y[i]+a1*x1[i];
}
}
) @*/
hipEvent_t start, stop;
HANDLE_ERROR(hipEventCreate(&start));
HANDLE_ERROR(hipEventCreate(&stop));
{
{
int orio_lbound1=0;
//{
/*declare variables*/
double *dev_y, *dev_x1;
int nthreads=TC;
/*calculate device dimensions*/
dim3 dimGrid, dimBlock;
dimBlock.x=nthreads;
dimGrid.x=(n+nthreads-1)/nthreads;
dimGrid.x=(dimGrid.x+UF-1)/UF;
printf("num of blocks: %d\n", dimGrid.x);
/*allocate device memory*/
int nbytes=n*sizeof(double);
hipMalloc((void**)&dev_y,nbytes);
hipMalloc((void**)&dev_x1,nbytes);
/*copy data from host to device*/
hipMemcpy(dev_y,y,nbytes,hipMemcpyHostToDevice);
hipMemcpy(dev_x1,x1,nbytes,hipMemcpyHostToDevice);
/*invoke device kernel*/
int orcu_var3=orio_lbound1;
HANDLE_ERROR(hipEventRecord(start, 0));
hipLaunchKernelGGL(( orcu_kernel6), dim3(dimGrid),dim3(dimBlock), 0, 0, n,orcu_var3,a1,dev_y,dev_x1);
HANDLE_ERROR(hipEventRecord(stop, 0));
/*copy data from device to host*/
hipMemcpy(y,dev_y,nbytes,hipMemcpyDeviceToHost);
/*free allocated memory*/
hipFree(dev_y);
hipFree(dev_x1);
//}
//int orio_lbound2=n-((n-(0))%2);
{
/*declare variables*/
//double *dev_y, *dev_x1;
//int nthreads=TC;
/*calculate device dimensions*/
//dim3 dimGrid, dimBlock;
//dimBlock.x=nthreads;
//dimGrid.x=(n+nthreads-1)/nthreads;
/*allocate device memory*/
//int nbytes=n*sizeof(double);
//hipMalloc((void**)&dev_y,nbytes);
//hipMalloc((void**)&dev_x1,nbytes);
/*copy data from host to device*/
//hipMemcpy(dev_y,y,nbytes,hipMemcpyHostToDevice);
//hipMemcpy(dev_x1,x1,nbytes,hipMemcpyHostToDevice);
/*invoke device kernel*/
//int orcu_var8=orio_lbound2;
//orcu_kernel11<<<dimGrid,dimBlock>>>(n,orcu_var8,a1,dev_y,dev_x1);
/*copy data from device to host*/
//hipMemcpy(y,dev_y,nbytes,hipMemcpyDeviceToHost);
/*free allocated memory*/
//hipFree(dev_y);
//hipFree(dev_x1);
}
}
}
/*@ end @*/
HANDLE_ERROR(hipEventSynchronize(stop));
float passedTime;
HANDLE_ERROR(hipEventElapsedTime(&passedTime, start, stop));
HANDLE_ERROR(hipEventDestroy(start));
HANDLE_ERROR(hipEventDestroy(stop));
printf("timePassed: %f ms\n", passedTime);
}
int main(){
double* y = (double*) malloc(sizeof(double)*NN);
double* x1 = (double*) malloc(sizeof(double)*NN);
double a1 = AA;
int i;
for(i=0; i<NN; i++){
y[i] = i;
x1[i] = i;
}
axpy1(NN, y, a1, x1);
for(i=0; i<13; i++)
printf("%f\n", y[i]);
for(i=NN-9; i<NN; i++)
printf("%f\n", y[i]);
return 0;
}
|
aaeefe207bfcb16fd448927c031dbb160e0f5274.cu
|
#include "book.h"
#define UF 8
__global__ void orcu_kernel6(int n, int orcu_var3, double a1, double* y, double* x1) {
int tid=UF*(blockIdx.x*blockDim.x+threadIdx.x)+orcu_var3;
if (tid<=n-UF) {
{
y[tid]=y[tid]+a1*x1[tid];
int index = tid+1;
y[index]=y[index]+a1*x1[index];
index = tid+2;
y[index]=y[index]+a1*x1[index];
index = tid+3;
y[index]=y[index]+a1*x1[index];
index = tid+4;
y[index]=y[index]+a1*x1[index];
index = tid+5;
y[index]=y[index]+a1*x1[index];
index = tid+6;
y[index]=y[index]+a1*x1[index];
index = tid+7;
y[index]=y[index]+a1*x1[index];
}
}
}
//__global__ void orcu_kernel11(int n, int orcu_var8, double a1, double* y, double* x1) {
//int tid=blockIdx.x*blockDim.x+threadIdx.x+orcu_var8;
//if (tid<=n-1) {
//y[tid]=y[tid]+a1*x1[tid];
//}
//}
void axpy1(int n, double *y, double a1, double *x1)
{
register int i;
/*@ begin Loop(
transform Composite(
cuda = (16,False, False, 1)
,scalarreplace = (False, 'int')
, unrolljam = (['i'], [2])
)
{
for (i=0; i<=n-1; i++) {
y[i]=y[i]+a1*x1[i];
}
}
) @*/
cudaEvent_t start, stop;
HANDLE_ERROR(cudaEventCreate(&start));
HANDLE_ERROR(cudaEventCreate(&stop));
{
{
int orio_lbound1=0;
//{
/*declare variables*/
double *dev_y, *dev_x1;
int nthreads=TC;
/*calculate device dimensions*/
dim3 dimGrid, dimBlock;
dimBlock.x=nthreads;
dimGrid.x=(n+nthreads-1)/nthreads;
dimGrid.x=(dimGrid.x+UF-1)/UF;
printf("num of blocks: %d\n", dimGrid.x);
/*allocate device memory*/
int nbytes=n*sizeof(double);
cudaMalloc((void**)&dev_y,nbytes);
cudaMalloc((void**)&dev_x1,nbytes);
/*copy data from host to device*/
cudaMemcpy(dev_y,y,nbytes,cudaMemcpyHostToDevice);
cudaMemcpy(dev_x1,x1,nbytes,cudaMemcpyHostToDevice);
/*invoke device kernel*/
int orcu_var3=orio_lbound1;
HANDLE_ERROR(cudaEventRecord(start, 0));
orcu_kernel6<<<dimGrid,dimBlock>>>(n,orcu_var3,a1,dev_y,dev_x1);
HANDLE_ERROR(cudaEventRecord(stop, 0));
/*copy data from device to host*/
cudaMemcpy(y,dev_y,nbytes,cudaMemcpyDeviceToHost);
/*free allocated memory*/
cudaFree(dev_y);
cudaFree(dev_x1);
//}
//int orio_lbound2=n-((n-(0))%2);
{
/*declare variables*/
//double *dev_y, *dev_x1;
//int nthreads=TC;
/*calculate device dimensions*/
//dim3 dimGrid, dimBlock;
//dimBlock.x=nthreads;
//dimGrid.x=(n+nthreads-1)/nthreads;
/*allocate device memory*/
//int nbytes=n*sizeof(double);
//cudaMalloc((void**)&dev_y,nbytes);
//cudaMalloc((void**)&dev_x1,nbytes);
/*copy data from host to device*/
//cudaMemcpy(dev_y,y,nbytes,cudaMemcpyHostToDevice);
//cudaMemcpy(dev_x1,x1,nbytes,cudaMemcpyHostToDevice);
/*invoke device kernel*/
//int orcu_var8=orio_lbound2;
//orcu_kernel11<<<dimGrid,dimBlock>>>(n,orcu_var8,a1,dev_y,dev_x1);
/*copy data from device to host*/
//cudaMemcpy(y,dev_y,nbytes,cudaMemcpyDeviceToHost);
/*free allocated memory*/
//cudaFree(dev_y);
//cudaFree(dev_x1);
}
}
}
/*@ end @*/
HANDLE_ERROR(cudaEventSynchronize(stop));
float passedTime;
HANDLE_ERROR(cudaEventElapsedTime(&passedTime, start, stop));
HANDLE_ERROR(cudaEventDestroy(start));
HANDLE_ERROR(cudaEventDestroy(stop));
printf("timePassed: %f ms\n", passedTime);
}
int main(){
double* y = (double*) malloc(sizeof(double)*NN);
double* x1 = (double*) malloc(sizeof(double)*NN);
double a1 = AA;
int i;
for(i=0; i<NN; i++){
y[i] = i;
x1[i] = i;
}
axpy1(NN, y, a1, x1);
for(i=0; i<13; i++)
printf("%f\n", y[i]);
for(i=NN-9; i<NN; i++)
printf("%f\n", y[i]);
return 0;
}
|
8cf11e0fda00e913614ac01c1ddf5d13d2afea5c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/sequence.h>
#include "Timer.hpp"
#include <iostream>
#include <string>
#include <cmath>
#include <algorithm>
#include <numeric>
#include <random>
// Sequential norm for verification purposes
// Confirmed working!
double norm(const thrust::host_vector<float>& v) {
double sum = 0.0;
for (size_t i = 0; i < v.size(); ++i){
sum += v[i] * v[i];
}
return std::sqrt(sum);
}
template <typename VectorType, typename T>
void randomize(VectorType &x, T scale) {
std::default_random_engine generator;
std::uniform_real_distribution<double> distribution(-scale, scale);
static auto dice = std::bind(distribution, generator);
for (size_t i = 0; i < x.size(); ++i) {
x[i] = dice();
}
}
template <typename T>
struct square
{
// The __host__ __device__ is required to get rid of a MASSIVE template warning
// Per the github documentation
__host__ __device__ T operator()(const T& x)
{
return x * x;
}
};
int main(int argc, char* argv[]) {
size_t exponent = 27;
size_t num_trips = 1;
if (argc >= 2) exponent = std::stol(argv[1]);
if (argc >= 3) num_trips = std::stol(argv[2]);
size_t num_elements = 1 << exponent;
thrust::host_vector<float> x(num_elements);
randomize(x, 10.0f);
thrust::device_vector<float> device_x(num_elements);
thrust::copy(x.begin(), x.end(), device_x.begin());
float init = 0.0;
float result = 0.0;
square<float> unary_op;
thrust::plus<float> binary_op;
DEF_TIMER(gpu_norm);
START_TIMER(gpu_norm);
hipDeviceSynchronize();
for (size_t i = 0; i < num_trips; ++i) {
/* write me -- use transform reduce (?) with unary op and binary op defined above */
result = thrust::transform_reduce(device_x.begin(), device_x.end(), unary_op, init, binary_op);
hipDeviceSynchronize();
result = std::sqrt(result);
}
double cuda_time = STOP_TIMER_QUIETLY(gpu_norm);
std::cout << exponent << "\t" << num_trips << "\t" << cuda_time << "\t" << result << std::endl;
return 0;
}
|
8cf11e0fda00e913614ac01c1ddf5d13d2afea5c.cu
|
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/sequence.h>
#include "Timer.hpp"
#include <iostream>
#include <string>
#include <cmath>
#include <algorithm>
#include <numeric>
#include <random>
// Sequential norm for verification purposes
// Confirmed working!
double norm(const thrust::host_vector<float>& v) {
double sum = 0.0;
for (size_t i = 0; i < v.size(); ++i){
sum += v[i] * v[i];
}
return std::sqrt(sum);
}
template <typename VectorType, typename T>
void randomize(VectorType &x, T scale) {
std::default_random_engine generator;
std::uniform_real_distribution<double> distribution(-scale, scale);
static auto dice = std::bind(distribution, generator);
for (size_t i = 0; i < x.size(); ++i) {
x[i] = dice();
}
}
template <typename T>
struct square
{
// The __host__ __device__ is required to get rid of a MASSIVE template warning
// Per the github documentation
__host__ __device__ T operator()(const T& x)
{
return x * x;
}
};
int main(int argc, char* argv[]) {
size_t exponent = 27;
size_t num_trips = 1;
if (argc >= 2) exponent = std::stol(argv[1]);
if (argc >= 3) num_trips = std::stol(argv[2]);
size_t num_elements = 1 << exponent;
thrust::host_vector<float> x(num_elements);
randomize(x, 10.0f);
thrust::device_vector<float> device_x(num_elements);
thrust::copy(x.begin(), x.end(), device_x.begin());
float init = 0.0;
float result = 0.0;
square<float> unary_op;
thrust::plus<float> binary_op;
DEF_TIMER(gpu_norm);
START_TIMER(gpu_norm);
cudaDeviceSynchronize();
for (size_t i = 0; i < num_trips; ++i) {
/* write me -- use transform reduce (?) with unary op and binary op defined above */
result = thrust::transform_reduce(device_x.begin(), device_x.end(), unary_op, init, binary_op);
cudaDeviceSynchronize();
result = std::sqrt(result);
}
double cuda_time = STOP_TIMER_QUIETLY(gpu_norm);
std::cout << exponent << "\t" << num_trips << "\t" << cuda_time << "\t" << result << std::endl;
return 0;
}
|
0eb47cd2bb9ea9eeff632512c5889f22cc13af24.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__constant__ float *c_Kernel;
__global__ void add(float *d_dst, float*d_src_1, int len) {
int baseX = blockIdx.x * blockDim.x + threadIdx.x;
if (baseX < len)
{
d_dst[baseX] = d_dst[baseX] + d_src_1[baseX];
}
}
|
0eb47cd2bb9ea9eeff632512c5889f22cc13af24.cu
|
#include "includes.h"
__constant__ float *c_Kernel;
__global__ void add(float *d_dst, float*d_src_1, int len) {
int baseX = blockIdx.x * blockDim.x + threadIdx.x;
if (baseX < len)
{
d_dst[baseX] = d_dst[baseX] + d_src_1[baseX];
}
}
|
3bf2717ffda2186307951f1b85b4c0506401c6c5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "box2d2r-512-10-256_kernel.hu"
__device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; }
__global__ void kernel0_10(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 10;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 472;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_3_3;
float __reg_3_4;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_4_3;
float __reg_4_4;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_5_3;
float __reg_5_4;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_6_3;
float __reg_6_4;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
float __reg_7_3;
float __reg_7_4;
float __reg_8_0;
float __reg_8_1;
float __reg_8_2;
float __reg_8_3;
float __reg_8_4;
float __reg_9_0;
float __reg_9_1;
float __reg_9_2;
float __reg_9_3;
float __reg_9_4;
float __reg_10_0;
float __reg_10_1;
float __reg_10_2;
float __reg_10_3;
float __reg_10_4;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10);
const AN5D_TYPE __storeValid = __writeValid10;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC6(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC7(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC8(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid8) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC9(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid9) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC10(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid10) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_0);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_0);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_0);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_0);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_0);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_0);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_0);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_0);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(2, __reg_10_2);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(3, __reg_10_3);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(4, __reg_10_4);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(5, __reg_10_0);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(6, __reg_10_1);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(7, __reg_10_2);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(8, __reg_10_3);
__LOAD(__reg_0, 29);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(9, __reg_10_4);
__LOAD(__reg_0, 30);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(10, __reg_10_0);
__LOAD(__reg_0, 31);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(11, __reg_10_1);
__LOAD(__reg_0, 32);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(12, __reg_10_2);
__LOAD(__reg_0, 33);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(13, __reg_10_3);
__LOAD(__reg_0, 34);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(14, __reg_10_4);
__LOAD(__reg_0, 35);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(15, __reg_10_0);
__LOAD(__reg_0, 36);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(16, __reg_10_1);
__LOAD(__reg_0, 37);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(17, __reg_10_2);
__LOAD(__reg_0, 38);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(18, __reg_10_3);
__LOAD(__reg_0, 39);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(19, __reg_10_4);
__LOAD(__reg_0, 40);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(20, __reg_10_0);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__LOAD(__reg_0, 29);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__LOAD(__reg_0, 30);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__LOAD(__reg_0, 31);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__LOAD(__reg_0, 32);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__LOAD(__reg_0, 33);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__LOAD(__reg_0, 34);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__LOAD(__reg_0, 35);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__LOAD(__reg_0, 36);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__LOAD(__reg_0, 37);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__LOAD(__reg_0, 38);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__LOAD(__reg_0, 39);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__LOAD(__reg_0, 40);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(20, __reg_10_0);
}
__a_sb = __a_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 41; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 20, __reg_10_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 20, __reg_10_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 20, __reg_10_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 20, __reg_10_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 20, __reg_10_0);
__h++;
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 20, __reg_10_1);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 19, __reg_10_2);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 18, __reg_10_3);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 17, __reg_10_4);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 16, __reg_10_0);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 15, __reg_10_1);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 14, __reg_10_2);
__reg_4_1 = __reg_3_1;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 13, __reg_10_3);
__reg_4_2 = __reg_3_2;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 12, __reg_10_4);
__reg_5_1 = __reg_4_1;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 11, __reg_10_0);
__reg_5_2 = __reg_4_2;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 10, __reg_10_1);
__reg_6_1 = __reg_5_1;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_4, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 9, __reg_10_2);
__reg_6_2 = __reg_5_2;
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 8, __reg_10_3);
__reg_7_1 = __reg_6_1;
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_4, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 7, __reg_10_4);
__reg_7_2 = __reg_6_2;
__CALC8(__reg_8_3, __reg_8_3, __reg_8_3, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 6, __reg_10_0);
__reg_8_1 = __reg_7_1;
__CALC8(__reg_8_4, __reg_8_4, __reg_8_4, __reg_8_4, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 5, __reg_10_1);
__reg_8_2 = __reg_7_2;
__CALC9(__reg_9_3, __reg_9_3, __reg_9_3, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 4, __reg_10_2);
__reg_9_1 = __reg_8_1;
__CALC9(__reg_9_4, __reg_9_4, __reg_9_4, __reg_9_4, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 3, __reg_10_3);
__reg_9_2 = __reg_8_2;
__CALC10(__reg_10_3, __reg_10_3, __reg_10_3, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 2, __reg_10_4);
__CALC10(__reg_10_4, __reg_10_4, __reg_10_4, __reg_10_4, __reg_10_0, __reg_9_2);
__STORE(__h - 1, __reg_10_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 20, __reg_10_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 19, __reg_10_2);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 18, __reg_10_3);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 17, __reg_10_4);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 16, __reg_10_0);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 15, __reg_10_1);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 14, __reg_10_2);
__reg_3_3 = __reg_2_3;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 13, __reg_10_3);
__reg_4_2 = __reg_3_2;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 12, __reg_10_4);
__reg_4_3 = __reg_3_3;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 11, __reg_10_0);
__reg_5_2 = __reg_4_2;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 10, __reg_10_1);
__reg_5_3 = __reg_4_3;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 9, __reg_10_2);
__reg_6_2 = __reg_5_2;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_0, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 8, __reg_10_3);
__reg_6_3 = __reg_5_3;
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 7, __reg_10_4);
__reg_7_2 = __reg_6_2;
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_0, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 6, __reg_10_0);
__reg_7_3 = __reg_6_3;
__CALC8(__reg_8_4, __reg_8_4, __reg_8_4, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 5, __reg_10_1);
__reg_8_2 = __reg_7_2;
__CALC8(__reg_8_0, __reg_8_0, __reg_8_0, __reg_8_0, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 4, __reg_10_2);
__reg_8_3 = __reg_7_3;
__CALC9(__reg_9_4, __reg_9_4, __reg_9_4, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 3, __reg_10_3);
__reg_9_2 = __reg_8_2;
__CALC9(__reg_9_0, __reg_9_0, __reg_9_0, __reg_9_0, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 2, __reg_10_4);
__reg_9_3 = __reg_8_3;
__CALC10(__reg_10_4, __reg_10_4, __reg_10_4, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 1, __reg_10_0);
__CALC10(__reg_10_0, __reg_10_0, __reg_10_0, __reg_10_0, __reg_10_1, __reg_9_3);
__STORE(__h + 0, __reg_10_1);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 20, __reg_10_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 19, __reg_10_2);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 18, __reg_10_3);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 17, __reg_10_4);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 16, __reg_10_0);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 15, __reg_10_1);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 14, __reg_10_2);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 13, __reg_10_3);
__reg_3_4 = __reg_2_4;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 12, __reg_10_4);
__reg_4_3 = __reg_3_3;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 11, __reg_10_0);
__reg_4_4 = __reg_3_4;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 10, __reg_10_1);
__reg_5_3 = __reg_4_3;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 9, __reg_10_2);
__reg_5_4 = __reg_4_4;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 8, __reg_10_3);
__reg_6_3 = __reg_5_3;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_1, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 7, __reg_10_4);
__reg_6_4 = __reg_5_4;
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 6, __reg_10_0);
__reg_7_3 = __reg_6_3;
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_1, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 5, __reg_10_1);
__reg_7_4 = __reg_6_4;
__CALC8(__reg_8_0, __reg_8_0, __reg_8_0, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 4, __reg_10_2);
__reg_8_3 = __reg_7_3;
__CALC8(__reg_8_1, __reg_8_1, __reg_8_1, __reg_8_1, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 3, __reg_10_3);
__reg_8_4 = __reg_7_4;
__CALC9(__reg_9_0, __reg_9_0, __reg_9_0, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 2, __reg_10_4);
__reg_9_3 = __reg_8_3;
__CALC9(__reg_9_1, __reg_9_1, __reg_9_1, __reg_9_1, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 1, __reg_10_0);
__reg_9_4 = __reg_8_4;
__CALC10(__reg_10_0, __reg_10_0, __reg_10_0, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h + 0, __reg_10_1);
__CALC10(__reg_10_1, __reg_10_1, __reg_10_1, __reg_10_1, __reg_10_2, __reg_9_4);
__STORE(__h + 1, __reg_10_2);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 20, __reg_10_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 19, __reg_10_2);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 18, __reg_10_3);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 17, __reg_10_4);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 16, __reg_10_0);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 15, __reg_10_1);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 14, __reg_10_2);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 13, __reg_10_3);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 12, __reg_10_4);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 11, __reg_10_0);
__reg_4_4 = __reg_3_4;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 10, __reg_10_1);
__reg_4_0 = __reg_3_0;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 9, __reg_10_2);
__reg_5_4 = __reg_4_4;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 8, __reg_10_3);
__reg_5_0 = __reg_4_0;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 7, __reg_10_4);
__reg_6_4 = __reg_5_4;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_2, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 6, __reg_10_0);
__reg_6_0 = __reg_5_0;
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 5, __reg_10_1);
__reg_7_4 = __reg_6_4;
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_2, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 4, __reg_10_2);
__reg_7_0 = __reg_6_0;
__CALC8(__reg_8_1, __reg_8_1, __reg_8_1, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 3, __reg_10_3);
__reg_8_4 = __reg_7_4;
__CALC8(__reg_8_2, __reg_8_2, __reg_8_2, __reg_8_2, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 2, __reg_10_4);
__reg_8_0 = __reg_7_0;
__CALC9(__reg_9_1, __reg_9_1, __reg_9_1, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 1, __reg_10_0);
__reg_9_4 = __reg_8_4;
__CALC9(__reg_9_2, __reg_9_2, __reg_9_2, __reg_9_2, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h + 0, __reg_10_1);
__reg_9_0 = __reg_8_0;
__CALC10(__reg_10_1, __reg_10_1, __reg_10_1, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h + 1, __reg_10_2);
__CALC10(__reg_10_2, __reg_10_2, __reg_10_2, __reg_10_2, __reg_10_3, __reg_9_0);
__STORE(__h + 2, __reg_10_3);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 20, __reg_10_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 19, __reg_10_2);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 18, __reg_10_3);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 17, __reg_10_4);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 16, __reg_10_0);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 15, __reg_10_1);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 14, __reg_10_2);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 13, __reg_10_3);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 12, __reg_10_4);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 11, __reg_10_0);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 10, __reg_10_1);
__reg_4_0 = __reg_3_0;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 9, __reg_10_2);
__reg_4_1 = __reg_3_1;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 8, __reg_10_3);
__reg_5_0 = __reg_4_0;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 7, __reg_10_4);
__reg_5_1 = __reg_4_1;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 6, __reg_10_0);
__reg_6_0 = __reg_5_0;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_3, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 5, __reg_10_1);
__reg_6_1 = __reg_5_1;
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 4, __reg_10_2);
__reg_7_0 = __reg_6_0;
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_3, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 3, __reg_10_3);
__reg_7_1 = __reg_6_1;
__CALC8(__reg_8_2, __reg_8_2, __reg_8_2, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 2, __reg_10_4);
__reg_8_0 = __reg_7_0;
__CALC8(__reg_8_3, __reg_8_3, __reg_8_3, __reg_8_3, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 1, __reg_10_0);
__reg_8_1 = __reg_7_1;
__CALC9(__reg_9_2, __reg_9_2, __reg_9_2, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h + 0, __reg_10_1);
__reg_9_0 = __reg_8_0;
__CALC9(__reg_9_3, __reg_9_3, __reg_9_3, __reg_9_3, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h + 1, __reg_10_2);
__reg_9_1 = __reg_8_1;
__CALC10(__reg_10_2, __reg_10_2, __reg_10_2, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h + 2, __reg_10_3);
__CALC10(__reg_10_3, __reg_10_3, __reg_10_3, __reg_10_3, __reg_10_4, __reg_9_1);
__STORE(__h + 3, __reg_10_4);
}
}
else
{
for (__h = 41; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 20, __reg_10_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 20, __reg_10_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 20, __reg_10_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 20, __reg_10_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 20, __reg_10_0);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 20, __reg_10_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 20, __reg_10_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 20, __reg_10_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 20, __reg_10_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 20, __reg_10_0);
__h++;
}
}
__global__ void kernel0_9(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 9;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 476;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_3_3;
float __reg_3_4;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_4_3;
float __reg_4_4;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_5_3;
float __reg_5_4;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_6_3;
float __reg_6_4;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
float __reg_7_3;
float __reg_7_4;
float __reg_8_0;
float __reg_8_1;
float __reg_8_2;
float __reg_8_3;
float __reg_8_4;
float __reg_9_0;
float __reg_9_1;
float __reg_9_2;
float __reg_9_3;
float __reg_9_4;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __storeValid = __writeValid9;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC6(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC7(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC8(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid8) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC9(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid9) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_0);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_0);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_0);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_0);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_0);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_0);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(2, __reg_9_2);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(3, __reg_9_3);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(4, __reg_9_4);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(5, __reg_9_0);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(6, __reg_9_1);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(7, __reg_9_2);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(8, __reg_9_3);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(9, __reg_9_4);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(10, __reg_9_0);
__LOAD(__reg_0, 29);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(11, __reg_9_1);
__LOAD(__reg_0, 30);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(12, __reg_9_2);
__LOAD(__reg_0, 31);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(13, __reg_9_3);
__LOAD(__reg_0, 32);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(14, __reg_9_4);
__LOAD(__reg_0, 33);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(15, __reg_9_0);
__LOAD(__reg_0, 34);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(16, __reg_9_1);
__LOAD(__reg_0, 35);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(17, __reg_9_2);
__LOAD(__reg_0, 36);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(18, __reg_9_3);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__LOAD(__reg_0, 29);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__LOAD(__reg_0, 30);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__LOAD(__reg_0, 31);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__LOAD(__reg_0, 32);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__LOAD(__reg_0, 33);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__LOAD(__reg_0, 34);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__LOAD(__reg_0, 35);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__LOAD(__reg_0, 36);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(18, __reg_9_3);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 37; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 18, __reg_9_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 18, __reg_9_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 18, __reg_9_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 18, __reg_9_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 17, __reg_9_0);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 16, __reg_9_1);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 15, __reg_9_2);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 14, __reg_9_3);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 13, __reg_9_4);
__reg_3_3 = __reg_2_3;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 12, __reg_9_0);
__reg_4_2 = __reg_3_2;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 11, __reg_9_1);
__reg_4_3 = __reg_3_3;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 10, __reg_9_2);
__reg_5_2 = __reg_4_2;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 9, __reg_9_3);
__reg_5_3 = __reg_4_3;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 8, __reg_9_4);
__reg_6_2 = __reg_5_2;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_0, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 7, __reg_9_0);
__reg_6_3 = __reg_5_3;
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 6, __reg_9_1);
__reg_7_2 = __reg_6_2;
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_0, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 5, __reg_9_2);
__reg_7_3 = __reg_6_3;
__CALC8(__reg_8_4, __reg_8_4, __reg_8_4, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 4, __reg_9_3);
__reg_8_2 = __reg_7_2;
__CALC8(__reg_8_0, __reg_8_0, __reg_8_0, __reg_8_0, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 3, __reg_9_4);
__reg_8_3 = __reg_7_3;
__CALC9(__reg_9_4, __reg_9_4, __reg_9_4, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 2, __reg_9_0);
__CALC9(__reg_9_0, __reg_9_0, __reg_9_0, __reg_9_0, __reg_9_1, __reg_8_3);
__STORE(__h - 1, __reg_9_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 17, __reg_9_0);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 16, __reg_9_1);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 15, __reg_9_2);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 14, __reg_9_3);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 13, __reg_9_4);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 12, __reg_9_0);
__reg_3_4 = __reg_2_4;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 11, __reg_9_1);
__reg_4_3 = __reg_3_3;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 10, __reg_9_2);
__reg_4_4 = __reg_3_4;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 9, __reg_9_3);
__reg_5_3 = __reg_4_3;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 8, __reg_9_4);
__reg_5_4 = __reg_4_4;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 7, __reg_9_0);
__reg_6_3 = __reg_5_3;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_1, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 6, __reg_9_1);
__reg_6_4 = __reg_5_4;
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 5, __reg_9_2);
__reg_7_3 = __reg_6_3;
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_1, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 4, __reg_9_3);
__reg_7_4 = __reg_6_4;
__CALC8(__reg_8_0, __reg_8_0, __reg_8_0, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 3, __reg_9_4);
__reg_8_3 = __reg_7_3;
__CALC8(__reg_8_1, __reg_8_1, __reg_8_1, __reg_8_1, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 2, __reg_9_0);
__reg_8_4 = __reg_7_4;
__CALC9(__reg_9_0, __reg_9_0, __reg_9_0, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 1, __reg_9_1);
__CALC9(__reg_9_1, __reg_9_1, __reg_9_1, __reg_9_1, __reg_9_2, __reg_8_4);
__STORE(__h + 0, __reg_9_2);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 17, __reg_9_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 16, __reg_9_1);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 15, __reg_9_2);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 14, __reg_9_3);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 13, __reg_9_4);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 12, __reg_9_0);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 11, __reg_9_1);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 10, __reg_9_2);
__reg_4_4 = __reg_3_4;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 9, __reg_9_3);
__reg_4_0 = __reg_3_0;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 8, __reg_9_4);
__reg_5_4 = __reg_4_4;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 7, __reg_9_0);
__reg_5_0 = __reg_4_0;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 6, __reg_9_1);
__reg_6_4 = __reg_5_4;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_2, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 5, __reg_9_2);
__reg_6_0 = __reg_5_0;
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 4, __reg_9_3);
__reg_7_4 = __reg_6_4;
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_2, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 3, __reg_9_4);
__reg_7_0 = __reg_6_0;
__CALC8(__reg_8_1, __reg_8_1, __reg_8_1, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 2, __reg_9_0);
__reg_8_4 = __reg_7_4;
__CALC8(__reg_8_2, __reg_8_2, __reg_8_2, __reg_8_2, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 1, __reg_9_1);
__reg_8_0 = __reg_7_0;
__CALC9(__reg_9_1, __reg_9_1, __reg_9_1, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h + 0, __reg_9_2);
__CALC9(__reg_9_2, __reg_9_2, __reg_9_2, __reg_9_2, __reg_9_3, __reg_8_0);
__STORE(__h + 1, __reg_9_3);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 17, __reg_9_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 16, __reg_9_1);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 15, __reg_9_2);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 14, __reg_9_3);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 13, __reg_9_4);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 12, __reg_9_0);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 11, __reg_9_1);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 10, __reg_9_2);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 9, __reg_9_3);
__reg_4_0 = __reg_3_0;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 8, __reg_9_4);
__reg_4_1 = __reg_3_1;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 7, __reg_9_0);
__reg_5_0 = __reg_4_0;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 6, __reg_9_1);
__reg_5_1 = __reg_4_1;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 5, __reg_9_2);
__reg_6_0 = __reg_5_0;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_3, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 4, __reg_9_3);
__reg_6_1 = __reg_5_1;
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 3, __reg_9_4);
__reg_7_0 = __reg_6_0;
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_3, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 2, __reg_9_0);
__reg_7_1 = __reg_6_1;
__CALC8(__reg_8_2, __reg_8_2, __reg_8_2, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 1, __reg_9_1);
__reg_8_0 = __reg_7_0;
__CALC8(__reg_8_3, __reg_8_3, __reg_8_3, __reg_8_3, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h + 0, __reg_9_2);
__reg_8_1 = __reg_7_1;
__CALC9(__reg_9_2, __reg_9_2, __reg_9_2, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h + 1, __reg_9_3);
__CALC9(__reg_9_3, __reg_9_3, __reg_9_3, __reg_9_3, __reg_9_4, __reg_8_1);
__STORE(__h + 2, __reg_9_4);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 17, __reg_9_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 16, __reg_9_1);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 15, __reg_9_2);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 14, __reg_9_3);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 13, __reg_9_4);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 12, __reg_9_0);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 11, __reg_9_1);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 10, __reg_9_2);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 9, __reg_9_3);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 8, __reg_9_4);
__reg_4_1 = __reg_3_1;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 7, __reg_9_0);
__reg_4_2 = __reg_3_2;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 6, __reg_9_1);
__reg_5_1 = __reg_4_1;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 5, __reg_9_2);
__reg_5_2 = __reg_4_2;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 4, __reg_9_3);
__reg_6_1 = __reg_5_1;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_4, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 3, __reg_9_4);
__reg_6_2 = __reg_5_2;
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 2, __reg_9_0);
__reg_7_1 = __reg_6_1;
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_4, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 1, __reg_9_1);
__reg_7_2 = __reg_6_2;
__CALC8(__reg_8_3, __reg_8_3, __reg_8_3, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h + 0, __reg_9_2);
__reg_8_1 = __reg_7_1;
__CALC8(__reg_8_4, __reg_8_4, __reg_8_4, __reg_8_4, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h + 1, __reg_9_3);
__reg_8_2 = __reg_7_2;
__CALC9(__reg_9_3, __reg_9_3, __reg_9_3, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h + 2, __reg_9_4);
__CALC9(__reg_9_4, __reg_9_4, __reg_9_4, __reg_9_4, __reg_9_0, __reg_8_2);
__STORE(__h + 3, __reg_9_0);
}
}
else
{
for (__h = 37; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 18, __reg_9_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 18, __reg_9_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 18, __reg_9_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 18, __reg_9_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 18, __reg_9_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 18, __reg_9_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 18, __reg_9_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 18, __reg_9_3);
__h++;
}
}
__global__ void kernel0_8(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 480;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_3_3;
float __reg_3_4;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_4_3;
float __reg_4_4;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_5_3;
float __reg_5_4;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_6_3;
float __reg_6_4;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
float __reg_7_3;
float __reg_7_4;
float __reg_8_0;
float __reg_8_1;
float __reg_8_2;
float __reg_8_3;
float __reg_8_4;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __storeValid = __writeValid8;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC6(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC7(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC8(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid8) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_0);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_0);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_0);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_0);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(2, __reg_8_2);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(3, __reg_8_3);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(4, __reg_8_4);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(5, __reg_8_0);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(6, __reg_8_1);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(7, __reg_8_2);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(8, __reg_8_3);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(9, __reg_8_4);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(10, __reg_8_0);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(11, __reg_8_1);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(12, __reg_8_2);
__LOAD(__reg_0, 29);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(13, __reg_8_3);
__LOAD(__reg_0, 30);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(14, __reg_8_4);
__LOAD(__reg_0, 31);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(15, __reg_8_0);
__LOAD(__reg_0, 32);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(16, __reg_8_1);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__LOAD(__reg_0, 29);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__LOAD(__reg_0, 30);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__LOAD(__reg_0, 31);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__LOAD(__reg_0, 32);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(16, __reg_8_1);
}
__a_sb = __a_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 33; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 16, __reg_8_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 16, __reg_8_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 16, __reg_8_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 16, __reg_8_1);
__h++;
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 15, __reg_8_3);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 14, __reg_8_4);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 13, __reg_8_0);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 12, __reg_8_1);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 11, __reg_8_2);
__reg_3_4 = __reg_2_4;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 10, __reg_8_3);
__reg_4_3 = __reg_3_3;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 9, __reg_8_4);
__reg_4_4 = __reg_3_4;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 8, __reg_8_0);
__reg_5_3 = __reg_4_3;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 7, __reg_8_1);
__reg_5_4 = __reg_4_4;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 6, __reg_8_2);
__reg_6_3 = __reg_5_3;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_1, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 5, __reg_8_3);
__reg_6_4 = __reg_5_4;
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 4, __reg_8_4);
__reg_7_3 = __reg_6_3;
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_1, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 3, __reg_8_0);
__reg_7_4 = __reg_6_4;
__CALC8(__reg_8_0, __reg_8_0, __reg_8_0, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 2, __reg_8_1);
__CALC8(__reg_8_1, __reg_8_1, __reg_8_1, __reg_8_1, __reg_8_2, __reg_7_4);
__STORE(__h - 1, __reg_8_2);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 15, __reg_8_3);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 14, __reg_8_4);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 13, __reg_8_0);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 12, __reg_8_1);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 11, __reg_8_2);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 10, __reg_8_3);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 9, __reg_8_4);
__reg_4_4 = __reg_3_4;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 8, __reg_8_0);
__reg_4_0 = __reg_3_0;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 7, __reg_8_1);
__reg_5_4 = __reg_4_4;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 6, __reg_8_2);
__reg_5_0 = __reg_4_0;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 5, __reg_8_3);
__reg_6_4 = __reg_5_4;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_2, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 4, __reg_8_4);
__reg_6_0 = __reg_5_0;
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 3, __reg_8_0);
__reg_7_4 = __reg_6_4;
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_2, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 2, __reg_8_1);
__reg_7_0 = __reg_6_0;
__CALC8(__reg_8_1, __reg_8_1, __reg_8_1, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 1, __reg_8_2);
__CALC8(__reg_8_2, __reg_8_2, __reg_8_2, __reg_8_2, __reg_8_3, __reg_7_0);
__STORE(__h + 0, __reg_8_3);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 15, __reg_8_3);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 14, __reg_8_4);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 13, __reg_8_0);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 12, __reg_8_1);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 11, __reg_8_2);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 10, __reg_8_3);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 9, __reg_8_4);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 8, __reg_8_0);
__reg_4_0 = __reg_3_0;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 7, __reg_8_1);
__reg_4_1 = __reg_3_1;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 6, __reg_8_2);
__reg_5_0 = __reg_4_0;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 5, __reg_8_3);
__reg_5_1 = __reg_4_1;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 4, __reg_8_4);
__reg_6_0 = __reg_5_0;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_3, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 3, __reg_8_0);
__reg_6_1 = __reg_5_1;
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 2, __reg_8_1);
__reg_7_0 = __reg_6_0;
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_3, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 1, __reg_8_2);
__reg_7_1 = __reg_6_1;
__CALC8(__reg_8_2, __reg_8_2, __reg_8_2, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h + 0, __reg_8_3);
__CALC8(__reg_8_3, __reg_8_3, __reg_8_3, __reg_8_3, __reg_8_4, __reg_7_1);
__STORE(__h + 1, __reg_8_4);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 15, __reg_8_3);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 14, __reg_8_4);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 13, __reg_8_0);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 12, __reg_8_1);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 11, __reg_8_2);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 10, __reg_8_3);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 9, __reg_8_4);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 8, __reg_8_0);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 7, __reg_8_1);
__reg_4_1 = __reg_3_1;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 6, __reg_8_2);
__reg_4_2 = __reg_3_2;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 5, __reg_8_3);
__reg_5_1 = __reg_4_1;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 4, __reg_8_4);
__reg_5_2 = __reg_4_2;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 3, __reg_8_0);
__reg_6_1 = __reg_5_1;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_4, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 2, __reg_8_1);
__reg_6_2 = __reg_5_2;
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 1, __reg_8_2);
__reg_7_1 = __reg_6_1;
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_4, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h + 0, __reg_8_3);
__reg_7_2 = __reg_6_2;
__CALC8(__reg_8_3, __reg_8_3, __reg_8_3, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h + 1, __reg_8_4);
__CALC8(__reg_8_4, __reg_8_4, __reg_8_4, __reg_8_4, __reg_8_0, __reg_7_2);
__STORE(__h + 2, __reg_8_0);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 15, __reg_8_3);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 14, __reg_8_4);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 13, __reg_8_0);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 12, __reg_8_1);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 11, __reg_8_2);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 10, __reg_8_3);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 9, __reg_8_4);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 8, __reg_8_0);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 7, __reg_8_1);
__reg_3_3 = __reg_2_3;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 6, __reg_8_2);
__reg_4_2 = __reg_3_2;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 5, __reg_8_3);
__reg_4_3 = __reg_3_3;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 4, __reg_8_4);
__reg_5_2 = __reg_4_2;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 3, __reg_8_0);
__reg_5_3 = __reg_4_3;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 2, __reg_8_1);
__reg_6_2 = __reg_5_2;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_0, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 1, __reg_8_2);
__reg_6_3 = __reg_5_3;
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h + 0, __reg_8_3);
__reg_7_2 = __reg_6_2;
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_0, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h + 1, __reg_8_4);
__reg_7_3 = __reg_6_3;
__CALC8(__reg_8_4, __reg_8_4, __reg_8_4, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h + 2, __reg_8_0);
__CALC8(__reg_8_0, __reg_8_0, __reg_8_0, __reg_8_0, __reg_8_1, __reg_7_3);
__STORE(__h + 3, __reg_8_1);
}
}
else
{
for (__h = 33; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 16, __reg_8_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 16, __reg_8_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 16, __reg_8_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 16, __reg_8_1);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 16, __reg_8_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 16, __reg_8_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 16, __reg_8_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 16, __reg_8_1);
__h++;
}
}
__global__ void kernel0_7(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 484;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_3_3;
float __reg_3_4;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_4_3;
float __reg_4_4;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_5_3;
float __reg_5_4;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_6_3;
float __reg_6_4;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
float __reg_7_3;
float __reg_7_4;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __storeValid = __writeValid7;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC6(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC7(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_0);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_0);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(2, __reg_7_2);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(3, __reg_7_3);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(4, __reg_7_4);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(5, __reg_7_0);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(6, __reg_7_1);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(7, __reg_7_2);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(8, __reg_7_3);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(9, __reg_7_4);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(10, __reg_7_0);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(11, __reg_7_1);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(12, __reg_7_2);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(13, __reg_7_3);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(14, __reg_7_4);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(14, __reg_7_4);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 29; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 14, __reg_7_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 14, __reg_7_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 14, __reg_7_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 14, __reg_7_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 13, __reg_7_1);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 12, __reg_7_2);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 11, __reg_7_3);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 10, __reg_7_4);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 9, __reg_7_0);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 8, __reg_7_1);
__reg_4_4 = __reg_3_4;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 7, __reg_7_2);
__reg_4_0 = __reg_3_0;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 6, __reg_7_3);
__reg_5_4 = __reg_4_4;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 5, __reg_7_4);
__reg_5_0 = __reg_4_0;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 4, __reg_7_0);
__reg_6_4 = __reg_5_4;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_2, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 3, __reg_7_1);
__reg_6_0 = __reg_5_0;
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 2, __reg_7_2);
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_2, __reg_7_3, __reg_6_0);
__STORE(__h - 1, __reg_7_3);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 13, __reg_7_1);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 12, __reg_7_2);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 11, __reg_7_3);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 10, __reg_7_4);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 9, __reg_7_0);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 8, __reg_7_1);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 7, __reg_7_2);
__reg_4_0 = __reg_3_0;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 6, __reg_7_3);
__reg_4_1 = __reg_3_1;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 5, __reg_7_4);
__reg_5_0 = __reg_4_0;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 4, __reg_7_0);
__reg_5_1 = __reg_4_1;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 3, __reg_7_1);
__reg_6_0 = __reg_5_0;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_3, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 2, __reg_7_2);
__reg_6_1 = __reg_5_1;
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 1, __reg_7_3);
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_3, __reg_7_4, __reg_6_1);
__STORE(__h + 0, __reg_7_4);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 13, __reg_7_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 12, __reg_7_2);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 11, __reg_7_3);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 10, __reg_7_4);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 9, __reg_7_0);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 8, __reg_7_1);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 7, __reg_7_2);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 6, __reg_7_3);
__reg_4_1 = __reg_3_1;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 5, __reg_7_4);
__reg_4_2 = __reg_3_2;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 4, __reg_7_0);
__reg_5_1 = __reg_4_1;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 3, __reg_7_1);
__reg_5_2 = __reg_4_2;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 2, __reg_7_2);
__reg_6_1 = __reg_5_1;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_4, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 1, __reg_7_3);
__reg_6_2 = __reg_5_2;
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h + 0, __reg_7_4);
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_4, __reg_7_0, __reg_6_2);
__STORE(__h + 1, __reg_7_0);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 13, __reg_7_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 12, __reg_7_2);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 11, __reg_7_3);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 10, __reg_7_4);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 9, __reg_7_0);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 8, __reg_7_1);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 7, __reg_7_2);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 6, __reg_7_3);
__reg_3_3 = __reg_2_3;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 5, __reg_7_4);
__reg_4_2 = __reg_3_2;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 4, __reg_7_0);
__reg_4_3 = __reg_3_3;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 3, __reg_7_1);
__reg_5_2 = __reg_4_2;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 2, __reg_7_2);
__reg_5_3 = __reg_4_3;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 1, __reg_7_3);
__reg_6_2 = __reg_5_2;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_0, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h + 0, __reg_7_4);
__reg_6_3 = __reg_5_3;
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h + 1, __reg_7_0);
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_0, __reg_7_1, __reg_6_3);
__STORE(__h + 2, __reg_7_1);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 13, __reg_7_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 12, __reg_7_2);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 11, __reg_7_3);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 10, __reg_7_4);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 9, __reg_7_0);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 8, __reg_7_1);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 7, __reg_7_2);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 6, __reg_7_3);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 5, __reg_7_4);
__reg_3_4 = __reg_2_4;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 4, __reg_7_0);
__reg_4_3 = __reg_3_3;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 3, __reg_7_1);
__reg_4_4 = __reg_3_4;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 2, __reg_7_2);
__reg_5_3 = __reg_4_3;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 1, __reg_7_3);
__reg_5_4 = __reg_4_4;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h + 0, __reg_7_4);
__reg_6_3 = __reg_5_3;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_1, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h + 1, __reg_7_0);
__reg_6_4 = __reg_5_4;
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h + 2, __reg_7_1);
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_1, __reg_7_2, __reg_6_4);
__STORE(__h + 3, __reg_7_2);
}
}
else
{
for (__h = 29; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 14, __reg_7_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 14, __reg_7_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 14, __reg_7_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 14, __reg_7_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 14, __reg_7_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 14, __reg_7_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 14, __reg_7_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 14, __reg_7_4);
__h++;
}
}
__global__ void kernel0_6(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_3_3;
float __reg_3_4;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_4_3;
float __reg_4_4;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_5_3;
float __reg_5_4;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_6_3;
float __reg_6_4;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __storeValid = __writeValid6;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC6(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(2, __reg_6_2);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(3, __reg_6_3);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(4, __reg_6_4);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(5, __reg_6_0);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(6, __reg_6_1);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(7, __reg_6_2);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(8, __reg_6_3);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(9, __reg_6_4);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(10, __reg_6_0);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(11, __reg_6_1);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(12, __reg_6_2);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(12, __reg_6_2);
}
__a_sb = __a_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 25; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 12, __reg_6_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 12, __reg_6_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 12, __reg_6_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 12, __reg_6_2);
__h++;
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 11, __reg_6_4);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 10, __reg_6_0);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 9, __reg_6_1);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 8, __reg_6_2);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 7, __reg_6_3);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 6, __reg_6_4);
__reg_4_0 = __reg_3_0;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 5, __reg_6_0);
__reg_4_1 = __reg_3_1;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 4, __reg_6_1);
__reg_5_0 = __reg_4_0;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 3, __reg_6_2);
__reg_5_1 = __reg_4_1;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 2, __reg_6_3);
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_3, __reg_6_4, __reg_5_1);
__STORE(__h - 1, __reg_6_4);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 11, __reg_6_4);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 10, __reg_6_0);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 9, __reg_6_1);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 8, __reg_6_2);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 7, __reg_6_3);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 6, __reg_6_4);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 5, __reg_6_0);
__reg_4_1 = __reg_3_1;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 4, __reg_6_1);
__reg_4_2 = __reg_3_2;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 3, __reg_6_2);
__reg_5_1 = __reg_4_1;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 2, __reg_6_3);
__reg_5_2 = __reg_4_2;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 1, __reg_6_4);
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_4, __reg_6_0, __reg_5_2);
__STORE(__h + 0, __reg_6_0);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 11, __reg_6_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 10, __reg_6_0);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 9, __reg_6_1);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 8, __reg_6_2);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 7, __reg_6_3);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 6, __reg_6_4);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 5, __reg_6_0);
__reg_3_3 = __reg_2_3;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 4, __reg_6_1);
__reg_4_2 = __reg_3_2;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 3, __reg_6_2);
__reg_4_3 = __reg_3_3;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 2, __reg_6_3);
__reg_5_2 = __reg_4_2;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 1, __reg_6_4);
__reg_5_3 = __reg_4_3;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h + 0, __reg_6_0);
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_0, __reg_6_1, __reg_5_3);
__STORE(__h + 1, __reg_6_1);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 11, __reg_6_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 10, __reg_6_0);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 9, __reg_6_1);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 8, __reg_6_2);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 7, __reg_6_3);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 6, __reg_6_4);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 5, __reg_6_0);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 4, __reg_6_1);
__reg_3_4 = __reg_2_4;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 3, __reg_6_2);
__reg_4_3 = __reg_3_3;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 2, __reg_6_3);
__reg_4_4 = __reg_3_4;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 1, __reg_6_4);
__reg_5_3 = __reg_4_3;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h + 0, __reg_6_0);
__reg_5_4 = __reg_4_4;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h + 1, __reg_6_1);
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_1, __reg_6_2, __reg_5_4);
__STORE(__h + 2, __reg_6_2);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 11, __reg_6_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 10, __reg_6_0);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 9, __reg_6_1);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 8, __reg_6_2);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 7, __reg_6_3);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 6, __reg_6_4);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 5, __reg_6_0);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 4, __reg_6_1);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 3, __reg_6_2);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 2, __reg_6_3);
__reg_4_4 = __reg_3_4;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 1, __reg_6_4);
__reg_4_0 = __reg_3_0;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h + 0, __reg_6_0);
__reg_5_4 = __reg_4_4;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h + 1, __reg_6_1);
__reg_5_0 = __reg_4_0;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h + 2, __reg_6_2);
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_2, __reg_6_3, __reg_5_0);
__STORE(__h + 3, __reg_6_3);
}
}
else
{
for (__h = 25; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 12, __reg_6_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 12, __reg_6_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 12, __reg_6_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 12, __reg_6_2);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 12, __reg_6_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 12, __reg_6_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 12, __reg_6_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 12, __reg_6_2);
__h++;
}
}
__global__ void kernel0_5(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_3_3;
float __reg_3_4;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_4_3;
float __reg_4_4;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_5_3;
float __reg_5_4;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __storeValid = __writeValid5;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(2, __reg_5_2);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(3, __reg_5_3);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(4, __reg_5_4);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(5, __reg_5_0);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(6, __reg_5_1);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(7, __reg_5_2);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(8, __reg_5_3);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(9, __reg_5_4);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(10, __reg_5_0);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(10, __reg_5_0);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 10, __reg_5_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 10, __reg_5_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 10, __reg_5_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 10, __reg_5_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 9, __reg_5_2);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 8, __reg_5_3);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 7, __reg_5_4);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 6, __reg_5_0);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 5, __reg_5_1);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 4, __reg_5_2);
__reg_4_1 = __reg_3_1;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 3, __reg_5_3);
__reg_4_2 = __reg_3_2;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 2, __reg_5_4);
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2);
__STORE(__h - 1, __reg_5_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 9, __reg_5_2);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 8, __reg_5_3);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 7, __reg_5_4);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 6, __reg_5_0);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 5, __reg_5_1);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 4, __reg_5_2);
__reg_3_3 = __reg_2_3;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 3, __reg_5_3);
__reg_4_2 = __reg_3_2;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 2, __reg_5_4);
__reg_4_3 = __reg_3_3;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 1, __reg_5_0);
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3);
__STORE(__h + 0, __reg_5_1);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 9, __reg_5_2);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 8, __reg_5_3);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 7, __reg_5_4);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 6, __reg_5_0);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 5, __reg_5_1);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 4, __reg_5_2);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 3, __reg_5_3);
__reg_3_4 = __reg_2_4;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 2, __reg_5_4);
__reg_4_3 = __reg_3_3;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 1, __reg_5_0);
__reg_4_4 = __reg_3_4;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h + 0, __reg_5_1);
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4);
__STORE(__h + 1, __reg_5_2);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 9, __reg_5_2);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 8, __reg_5_3);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 7, __reg_5_4);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 6, __reg_5_0);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 5, __reg_5_1);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 4, __reg_5_2);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 3, __reg_5_3);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 2, __reg_5_4);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 1, __reg_5_0);
__reg_4_4 = __reg_3_4;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h + 0, __reg_5_1);
__reg_4_0 = __reg_3_0;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h + 1, __reg_5_2);
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0);
__STORE(__h + 2, __reg_5_3);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 9, __reg_5_2);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 8, __reg_5_3);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 7, __reg_5_4);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 6, __reg_5_0);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 5, __reg_5_1);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 4, __reg_5_2);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 3, __reg_5_3);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 2, __reg_5_4);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 1, __reg_5_0);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h + 0, __reg_5_1);
__reg_4_0 = __reg_3_0;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h + 1, __reg_5_2);
__reg_4_1 = __reg_3_1;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h + 2, __reg_5_3);
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1);
__STORE(__h + 3, __reg_5_4);
}
}
else
{
for (__h = 21; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 10, __reg_5_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 10, __reg_5_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 10, __reg_5_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 10, __reg_5_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 10, __reg_5_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 10, __reg_5_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 10, __reg_5_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 10, __reg_5_0);
__h++;
}
}
__global__ void kernel0_4(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_3_3;
float __reg_3_4;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_4_3;
float __reg_4_4;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __storeValid = __writeValid4;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(2, __reg_4_2);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(3, __reg_4_3);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(4, __reg_4_4);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(5, __reg_4_0);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(6, __reg_4_1);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(7, __reg_4_2);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(8, __reg_4_3);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(8, __reg_4_3);
}
__a_sb = __a_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 8, __reg_4_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 8, __reg_4_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 8, __reg_4_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 8, __reg_4_3);
__h++;
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 7, __reg_4_0);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 6, __reg_4_1);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 5, __reg_4_2);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 4, __reg_4_3);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 3, __reg_4_4);
__reg_3_3 = __reg_2_3;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 2, __reg_4_0);
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3);
__STORE(__h - 1, __reg_4_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 7, __reg_4_0);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 6, __reg_4_1);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 5, __reg_4_2);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 4, __reg_4_3);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 3, __reg_4_4);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 2, __reg_4_0);
__reg_3_4 = __reg_2_4;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 1, __reg_4_1);
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4);
__STORE(__h + 0, __reg_4_2);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 7, __reg_4_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 6, __reg_4_1);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 5, __reg_4_2);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 4, __reg_4_3);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 3, __reg_4_4);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 2, __reg_4_0);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 1, __reg_4_1);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h + 0, __reg_4_2);
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0);
__STORE(__h + 1, __reg_4_3);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 7, __reg_4_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 6, __reg_4_1);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 5, __reg_4_2);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 4, __reg_4_3);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 3, __reg_4_4);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 2, __reg_4_0);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 1, __reg_4_1);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h + 0, __reg_4_2);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h + 1, __reg_4_3);
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1);
__STORE(__h + 2, __reg_4_4);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 7, __reg_4_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 6, __reg_4_1);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 5, __reg_4_2);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 4, __reg_4_3);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 3, __reg_4_4);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 2, __reg_4_0);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 1, __reg_4_1);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h + 0, __reg_4_2);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h + 1, __reg_4_3);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h + 2, __reg_4_4);
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2);
__STORE(__h + 3, __reg_4_0);
}
}
else
{
for (__h = 17; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 8, __reg_4_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 8, __reg_4_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 8, __reg_4_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 8, __reg_4_3);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 8, __reg_4_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 8, __reg_4_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 8, __reg_4_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 8, __reg_4_3);
__h++;
}
}
__global__ void kernel0_3(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_3_3;
float __reg_3_4;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __storeValid = __writeValid3;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(2, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(3, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(4, __reg_3_4);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(5, __reg_3_0);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(6, __reg_3_1);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(6, __reg_3_1);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 6, __reg_3_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 6, __reg_3_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 6, __reg_3_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 6, __reg_3_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 5, __reg_3_3);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 4, __reg_3_4);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 3, __reg_3_0);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 2, __reg_3_1);
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__STORE(__h - 1, __reg_3_2);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 5, __reg_3_3);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 4, __reg_3_4);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 3, __reg_3_0);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 2, __reg_3_1);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 1, __reg_3_2);
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__STORE(__h + 0, __reg_3_3);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 5, __reg_3_3);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 4, __reg_3_4);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 3, __reg_3_0);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 2, __reg_3_1);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 1, __reg_3_2);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h + 0, __reg_3_3);
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__STORE(__h + 1, __reg_3_4);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 5, __reg_3_3);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 4, __reg_3_4);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 3, __reg_3_0);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 2, __reg_3_1);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 1, __reg_3_2);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h + 0, __reg_3_3);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h + 1, __reg_3_4);
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__STORE(__h + 2, __reg_3_0);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 5, __reg_3_3);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 4, __reg_3_4);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 3, __reg_3_0);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 2, __reg_3_1);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 1, __reg_3_2);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h + 0, __reg_3_3);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h + 1, __reg_3_4);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h + 2, __reg_3_0);
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__STORE(__h + 3, __reg_3_1);
}
}
else
{
for (__h = 13; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 6, __reg_3_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 6, __reg_3_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 6, __reg_3_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 6, __reg_3_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 6, __reg_3_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 6, __reg_3_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 6, __reg_3_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 6, __reg_3_1);
__h++;
}
}
__global__ void kernel0_2(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __storeValid = __writeValid2;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(2, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(3, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(4, __reg_2_4);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(4, __reg_2_4);
}
__a_sb = __a_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 4, __reg_2_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 4, __reg_2_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(__h - 4, __reg_2_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(__h - 4, __reg_2_4);
__h++;
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 3, __reg_2_1);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 2, __reg_2_2);
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__STORE(__h - 1, __reg_2_3);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 3, __reg_2_1);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 2, __reg_2_2);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(__h - 1, __reg_2_3);
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__STORE(__h + 0, __reg_2_4);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 3, __reg_2_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 2, __reg_2_2);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(__h - 1, __reg_2_3);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(__h + 0, __reg_2_4);
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__STORE(__h + 1, __reg_2_0);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 3, __reg_2_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 2, __reg_2_2);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(__h - 1, __reg_2_3);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(__h + 0, __reg_2_4);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h + 1, __reg_2_0);
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__STORE(__h + 2, __reg_2_1);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 3, __reg_2_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 2, __reg_2_2);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(__h - 1, __reg_2_3);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(__h + 0, __reg_2_4);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h + 1, __reg_2_0);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h + 2, __reg_2_1);
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__STORE(__h + 3, __reg_2_2);
}
}
else
{
for (__h = 9; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 4, __reg_2_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 4, __reg_2_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(__h - 4, __reg_2_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(__h - 4, __reg_2_4);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 4, __reg_2_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 4, __reg_2_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(__h - 4, __reg_2_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(__h - 4, __reg_2_4);
__h++;
}
}
__global__ void kernel0_1(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(2, __reg_1_2);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(2, __reg_1_2);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 2, __reg_1_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 2, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 2, __reg_1_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 2, __reg_1_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__STORE(__h - 1, __reg_1_4);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 1, __reg_1_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 1, __reg_1_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 1, __reg_1_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 1, __reg_1_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
}
}
else
{
for (__h = 5; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 2, __reg_1_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 2, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 2, __reg_1_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 2, __reg_1_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 2, __reg_1_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 2, __reg_1_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 2, __reg_1_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 2, __reg_1_2);
__h++;
}
}
|
3bf2717ffda2186307951f1b85b4c0506401c6c5.cu
|
#include "box2d2r-512-10-256_kernel.hu"
__device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; }
__global__ void kernel0_10(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 10;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 472;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_3_3;
float __reg_3_4;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_4_3;
float __reg_4_4;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_5_3;
float __reg_5_4;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_6_3;
float __reg_6_4;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
float __reg_7_3;
float __reg_7_4;
float __reg_8_0;
float __reg_8_1;
float __reg_8_2;
float __reg_8_3;
float __reg_8_4;
float __reg_9_0;
float __reg_9_1;
float __reg_9_2;
float __reg_9_3;
float __reg_9_4;
float __reg_10_0;
float __reg_10_1;
float __reg_10_2;
float __reg_10_3;
float __reg_10_4;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10);
const AN5D_TYPE __storeValid = __writeValid10;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC6(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC7(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC8(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid8) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC9(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid9) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC10(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid10) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_0);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_0);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_0);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_0);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_0);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_0);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_0);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_0);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(2, __reg_10_2);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(3, __reg_10_3);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(4, __reg_10_4);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(5, __reg_10_0);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(6, __reg_10_1);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(7, __reg_10_2);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(8, __reg_10_3);
__LOAD(__reg_0, 29);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(9, __reg_10_4);
__LOAD(__reg_0, 30);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(10, __reg_10_0);
__LOAD(__reg_0, 31);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(11, __reg_10_1);
__LOAD(__reg_0, 32);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(12, __reg_10_2);
__LOAD(__reg_0, 33);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(13, __reg_10_3);
__LOAD(__reg_0, 34);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(14, __reg_10_4);
__LOAD(__reg_0, 35);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(15, __reg_10_0);
__LOAD(__reg_0, 36);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(16, __reg_10_1);
__LOAD(__reg_0, 37);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(17, __reg_10_2);
__LOAD(__reg_0, 38);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(18, __reg_10_3);
__LOAD(__reg_0, 39);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(19, __reg_10_4);
__LOAD(__reg_0, 40);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(20, __reg_10_0);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__LOAD(__reg_0, 29);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__LOAD(__reg_0, 30);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__LOAD(__reg_0, 31);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__LOAD(__reg_0, 32);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__LOAD(__reg_0, 33);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__LOAD(__reg_0, 34);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__LOAD(__reg_0, 35);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__LOAD(__reg_0, 36);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__LOAD(__reg_0, 37);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__LOAD(__reg_0, 38);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__LOAD(__reg_0, 39);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__LOAD(__reg_0, 40);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(20, __reg_10_0);
}
__a_sb = __a_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 41; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 20, __reg_10_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 20, __reg_10_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 20, __reg_10_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 20, __reg_10_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 20, __reg_10_0);
__h++;
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 20, __reg_10_1);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 19, __reg_10_2);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 18, __reg_10_3);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 17, __reg_10_4);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 16, __reg_10_0);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 15, __reg_10_1);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 14, __reg_10_2);
__reg_4_1 = __reg_3_1;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 13, __reg_10_3);
__reg_4_2 = __reg_3_2;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 12, __reg_10_4);
__reg_5_1 = __reg_4_1;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 11, __reg_10_0);
__reg_5_2 = __reg_4_2;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 10, __reg_10_1);
__reg_6_1 = __reg_5_1;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_4, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 9, __reg_10_2);
__reg_6_2 = __reg_5_2;
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 8, __reg_10_3);
__reg_7_1 = __reg_6_1;
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_4, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 7, __reg_10_4);
__reg_7_2 = __reg_6_2;
__CALC8(__reg_8_3, __reg_8_3, __reg_8_3, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 6, __reg_10_0);
__reg_8_1 = __reg_7_1;
__CALC8(__reg_8_4, __reg_8_4, __reg_8_4, __reg_8_4, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 5, __reg_10_1);
__reg_8_2 = __reg_7_2;
__CALC9(__reg_9_3, __reg_9_3, __reg_9_3, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 4, __reg_10_2);
__reg_9_1 = __reg_8_1;
__CALC9(__reg_9_4, __reg_9_4, __reg_9_4, __reg_9_4, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 3, __reg_10_3);
__reg_9_2 = __reg_8_2;
__CALC10(__reg_10_3, __reg_10_3, __reg_10_3, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 2, __reg_10_4);
__CALC10(__reg_10_4, __reg_10_4, __reg_10_4, __reg_10_4, __reg_10_0, __reg_9_2);
__STORE(__h - 1, __reg_10_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 20, __reg_10_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 19, __reg_10_2);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 18, __reg_10_3);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 17, __reg_10_4);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 16, __reg_10_0);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 15, __reg_10_1);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 14, __reg_10_2);
__reg_3_3 = __reg_2_3;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 13, __reg_10_3);
__reg_4_2 = __reg_3_2;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 12, __reg_10_4);
__reg_4_3 = __reg_3_3;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 11, __reg_10_0);
__reg_5_2 = __reg_4_2;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 10, __reg_10_1);
__reg_5_3 = __reg_4_3;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 9, __reg_10_2);
__reg_6_2 = __reg_5_2;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_0, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 8, __reg_10_3);
__reg_6_3 = __reg_5_3;
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 7, __reg_10_4);
__reg_7_2 = __reg_6_2;
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_0, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 6, __reg_10_0);
__reg_7_3 = __reg_6_3;
__CALC8(__reg_8_4, __reg_8_4, __reg_8_4, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 5, __reg_10_1);
__reg_8_2 = __reg_7_2;
__CALC8(__reg_8_0, __reg_8_0, __reg_8_0, __reg_8_0, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 4, __reg_10_2);
__reg_8_3 = __reg_7_3;
__CALC9(__reg_9_4, __reg_9_4, __reg_9_4, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 3, __reg_10_3);
__reg_9_2 = __reg_8_2;
__CALC9(__reg_9_0, __reg_9_0, __reg_9_0, __reg_9_0, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 2, __reg_10_4);
__reg_9_3 = __reg_8_3;
__CALC10(__reg_10_4, __reg_10_4, __reg_10_4, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 1, __reg_10_0);
__CALC10(__reg_10_0, __reg_10_0, __reg_10_0, __reg_10_0, __reg_10_1, __reg_9_3);
__STORE(__h + 0, __reg_10_1);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 20, __reg_10_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 19, __reg_10_2);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 18, __reg_10_3);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 17, __reg_10_4);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 16, __reg_10_0);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 15, __reg_10_1);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 14, __reg_10_2);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 13, __reg_10_3);
__reg_3_4 = __reg_2_4;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 12, __reg_10_4);
__reg_4_3 = __reg_3_3;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 11, __reg_10_0);
__reg_4_4 = __reg_3_4;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 10, __reg_10_1);
__reg_5_3 = __reg_4_3;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 9, __reg_10_2);
__reg_5_4 = __reg_4_4;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 8, __reg_10_3);
__reg_6_3 = __reg_5_3;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_1, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 7, __reg_10_4);
__reg_6_4 = __reg_5_4;
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 6, __reg_10_0);
__reg_7_3 = __reg_6_3;
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_1, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 5, __reg_10_1);
__reg_7_4 = __reg_6_4;
__CALC8(__reg_8_0, __reg_8_0, __reg_8_0, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 4, __reg_10_2);
__reg_8_3 = __reg_7_3;
__CALC8(__reg_8_1, __reg_8_1, __reg_8_1, __reg_8_1, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 3, __reg_10_3);
__reg_8_4 = __reg_7_4;
__CALC9(__reg_9_0, __reg_9_0, __reg_9_0, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 2, __reg_10_4);
__reg_9_3 = __reg_8_3;
__CALC9(__reg_9_1, __reg_9_1, __reg_9_1, __reg_9_1, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 1, __reg_10_0);
__reg_9_4 = __reg_8_4;
__CALC10(__reg_10_0, __reg_10_0, __reg_10_0, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h + 0, __reg_10_1);
__CALC10(__reg_10_1, __reg_10_1, __reg_10_1, __reg_10_1, __reg_10_2, __reg_9_4);
__STORE(__h + 1, __reg_10_2);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 20, __reg_10_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 19, __reg_10_2);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 18, __reg_10_3);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 17, __reg_10_4);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 16, __reg_10_0);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 15, __reg_10_1);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 14, __reg_10_2);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 13, __reg_10_3);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 12, __reg_10_4);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 11, __reg_10_0);
__reg_4_4 = __reg_3_4;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 10, __reg_10_1);
__reg_4_0 = __reg_3_0;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 9, __reg_10_2);
__reg_5_4 = __reg_4_4;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 8, __reg_10_3);
__reg_5_0 = __reg_4_0;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 7, __reg_10_4);
__reg_6_4 = __reg_5_4;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_2, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 6, __reg_10_0);
__reg_6_0 = __reg_5_0;
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 5, __reg_10_1);
__reg_7_4 = __reg_6_4;
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_2, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 4, __reg_10_2);
__reg_7_0 = __reg_6_0;
__CALC8(__reg_8_1, __reg_8_1, __reg_8_1, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 3, __reg_10_3);
__reg_8_4 = __reg_7_4;
__CALC8(__reg_8_2, __reg_8_2, __reg_8_2, __reg_8_2, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 2, __reg_10_4);
__reg_8_0 = __reg_7_0;
__CALC9(__reg_9_1, __reg_9_1, __reg_9_1, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 1, __reg_10_0);
__reg_9_4 = __reg_8_4;
__CALC9(__reg_9_2, __reg_9_2, __reg_9_2, __reg_9_2, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h + 0, __reg_10_1);
__reg_9_0 = __reg_8_0;
__CALC10(__reg_10_1, __reg_10_1, __reg_10_1, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h + 1, __reg_10_2);
__CALC10(__reg_10_2, __reg_10_2, __reg_10_2, __reg_10_2, __reg_10_3, __reg_9_0);
__STORE(__h + 2, __reg_10_3);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 20, __reg_10_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 19, __reg_10_2);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 18, __reg_10_3);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 17, __reg_10_4);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 16, __reg_10_0);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 15, __reg_10_1);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 14, __reg_10_2);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 13, __reg_10_3);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 12, __reg_10_4);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 11, __reg_10_0);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 10, __reg_10_1);
__reg_4_0 = __reg_3_0;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 9, __reg_10_2);
__reg_4_1 = __reg_3_1;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 8, __reg_10_3);
__reg_5_0 = __reg_4_0;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 7, __reg_10_4);
__reg_5_1 = __reg_4_1;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 6, __reg_10_0);
__reg_6_0 = __reg_5_0;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_3, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 5, __reg_10_1);
__reg_6_1 = __reg_5_1;
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 4, __reg_10_2);
__reg_7_0 = __reg_6_0;
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_3, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 3, __reg_10_3);
__reg_7_1 = __reg_6_1;
__CALC8(__reg_8_2, __reg_8_2, __reg_8_2, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 2, __reg_10_4);
__reg_8_0 = __reg_7_0;
__CALC8(__reg_8_3, __reg_8_3, __reg_8_3, __reg_8_3, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 1, __reg_10_0);
__reg_8_1 = __reg_7_1;
__CALC9(__reg_9_2, __reg_9_2, __reg_9_2, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h + 0, __reg_10_1);
__reg_9_0 = __reg_8_0;
__CALC9(__reg_9_3, __reg_9_3, __reg_9_3, __reg_9_3, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h + 1, __reg_10_2);
__reg_9_1 = __reg_8_1;
__CALC10(__reg_10_2, __reg_10_2, __reg_10_2, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h + 2, __reg_10_3);
__CALC10(__reg_10_3, __reg_10_3, __reg_10_3, __reg_10_3, __reg_10_4, __reg_9_1);
__STORE(__h + 3, __reg_10_4);
}
}
else
{
for (__h = 41; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 20, __reg_10_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 20, __reg_10_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 20, __reg_10_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 20, __reg_10_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 20, __reg_10_0);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 20, __reg_10_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 20, __reg_10_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 20, __reg_10_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 20, __reg_10_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 20, __reg_10_0);
__h++;
}
}
__global__ void kernel0_9(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 9;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 476;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_3_3;
float __reg_3_4;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_4_3;
float __reg_4_4;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_5_3;
float __reg_5_4;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_6_3;
float __reg_6_4;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
float __reg_7_3;
float __reg_7_4;
float __reg_8_0;
float __reg_8_1;
float __reg_8_2;
float __reg_8_3;
float __reg_8_4;
float __reg_9_0;
float __reg_9_1;
float __reg_9_2;
float __reg_9_3;
float __reg_9_4;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __storeValid = __writeValid9;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC6(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC7(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC8(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid8) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC9(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid9) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_0);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_0);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_0);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_0);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_0);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_0);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(2, __reg_9_2);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(3, __reg_9_3);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(4, __reg_9_4);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(5, __reg_9_0);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(6, __reg_9_1);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(7, __reg_9_2);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(8, __reg_9_3);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(9, __reg_9_4);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(10, __reg_9_0);
__LOAD(__reg_0, 29);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(11, __reg_9_1);
__LOAD(__reg_0, 30);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(12, __reg_9_2);
__LOAD(__reg_0, 31);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(13, __reg_9_3);
__LOAD(__reg_0, 32);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(14, __reg_9_4);
__LOAD(__reg_0, 33);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(15, __reg_9_0);
__LOAD(__reg_0, 34);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(16, __reg_9_1);
__LOAD(__reg_0, 35);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(17, __reg_9_2);
__LOAD(__reg_0, 36);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(18, __reg_9_3);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__LOAD(__reg_0, 29);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__LOAD(__reg_0, 30);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__LOAD(__reg_0, 31);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__LOAD(__reg_0, 32);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__LOAD(__reg_0, 33);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__LOAD(__reg_0, 34);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__LOAD(__reg_0, 35);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__LOAD(__reg_0, 36);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(18, __reg_9_3);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 37; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 18, __reg_9_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 18, __reg_9_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 18, __reg_9_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 18, __reg_9_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 17, __reg_9_0);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 16, __reg_9_1);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 15, __reg_9_2);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 14, __reg_9_3);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 13, __reg_9_4);
__reg_3_3 = __reg_2_3;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 12, __reg_9_0);
__reg_4_2 = __reg_3_2;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 11, __reg_9_1);
__reg_4_3 = __reg_3_3;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 10, __reg_9_2);
__reg_5_2 = __reg_4_2;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 9, __reg_9_3);
__reg_5_3 = __reg_4_3;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 8, __reg_9_4);
__reg_6_2 = __reg_5_2;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_0, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 7, __reg_9_0);
__reg_6_3 = __reg_5_3;
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 6, __reg_9_1);
__reg_7_2 = __reg_6_2;
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_0, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 5, __reg_9_2);
__reg_7_3 = __reg_6_3;
__CALC8(__reg_8_4, __reg_8_4, __reg_8_4, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 4, __reg_9_3);
__reg_8_2 = __reg_7_2;
__CALC8(__reg_8_0, __reg_8_0, __reg_8_0, __reg_8_0, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 3, __reg_9_4);
__reg_8_3 = __reg_7_3;
__CALC9(__reg_9_4, __reg_9_4, __reg_9_4, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 2, __reg_9_0);
__CALC9(__reg_9_0, __reg_9_0, __reg_9_0, __reg_9_0, __reg_9_1, __reg_8_3);
__STORE(__h - 1, __reg_9_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 17, __reg_9_0);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 16, __reg_9_1);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 15, __reg_9_2);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 14, __reg_9_3);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 13, __reg_9_4);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 12, __reg_9_0);
__reg_3_4 = __reg_2_4;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 11, __reg_9_1);
__reg_4_3 = __reg_3_3;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 10, __reg_9_2);
__reg_4_4 = __reg_3_4;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 9, __reg_9_3);
__reg_5_3 = __reg_4_3;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 8, __reg_9_4);
__reg_5_4 = __reg_4_4;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 7, __reg_9_0);
__reg_6_3 = __reg_5_3;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_1, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 6, __reg_9_1);
__reg_6_4 = __reg_5_4;
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 5, __reg_9_2);
__reg_7_3 = __reg_6_3;
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_1, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 4, __reg_9_3);
__reg_7_4 = __reg_6_4;
__CALC8(__reg_8_0, __reg_8_0, __reg_8_0, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 3, __reg_9_4);
__reg_8_3 = __reg_7_3;
__CALC8(__reg_8_1, __reg_8_1, __reg_8_1, __reg_8_1, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 2, __reg_9_0);
__reg_8_4 = __reg_7_4;
__CALC9(__reg_9_0, __reg_9_0, __reg_9_0, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 1, __reg_9_1);
__CALC9(__reg_9_1, __reg_9_1, __reg_9_1, __reg_9_1, __reg_9_2, __reg_8_4);
__STORE(__h + 0, __reg_9_2);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 17, __reg_9_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 16, __reg_9_1);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 15, __reg_9_2);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 14, __reg_9_3);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 13, __reg_9_4);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 12, __reg_9_0);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 11, __reg_9_1);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 10, __reg_9_2);
__reg_4_4 = __reg_3_4;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 9, __reg_9_3);
__reg_4_0 = __reg_3_0;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 8, __reg_9_4);
__reg_5_4 = __reg_4_4;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 7, __reg_9_0);
__reg_5_0 = __reg_4_0;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 6, __reg_9_1);
__reg_6_4 = __reg_5_4;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_2, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 5, __reg_9_2);
__reg_6_0 = __reg_5_0;
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 4, __reg_9_3);
__reg_7_4 = __reg_6_4;
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_2, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 3, __reg_9_4);
__reg_7_0 = __reg_6_0;
__CALC8(__reg_8_1, __reg_8_1, __reg_8_1, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 2, __reg_9_0);
__reg_8_4 = __reg_7_4;
__CALC8(__reg_8_2, __reg_8_2, __reg_8_2, __reg_8_2, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 1, __reg_9_1);
__reg_8_0 = __reg_7_0;
__CALC9(__reg_9_1, __reg_9_1, __reg_9_1, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h + 0, __reg_9_2);
__CALC9(__reg_9_2, __reg_9_2, __reg_9_2, __reg_9_2, __reg_9_3, __reg_8_0);
__STORE(__h + 1, __reg_9_3);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 17, __reg_9_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 16, __reg_9_1);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 15, __reg_9_2);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 14, __reg_9_3);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 13, __reg_9_4);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 12, __reg_9_0);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 11, __reg_9_1);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 10, __reg_9_2);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 9, __reg_9_3);
__reg_4_0 = __reg_3_0;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 8, __reg_9_4);
__reg_4_1 = __reg_3_1;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 7, __reg_9_0);
__reg_5_0 = __reg_4_0;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 6, __reg_9_1);
__reg_5_1 = __reg_4_1;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 5, __reg_9_2);
__reg_6_0 = __reg_5_0;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_3, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 4, __reg_9_3);
__reg_6_1 = __reg_5_1;
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 3, __reg_9_4);
__reg_7_0 = __reg_6_0;
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_3, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 2, __reg_9_0);
__reg_7_1 = __reg_6_1;
__CALC8(__reg_8_2, __reg_8_2, __reg_8_2, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 1, __reg_9_1);
__reg_8_0 = __reg_7_0;
__CALC8(__reg_8_3, __reg_8_3, __reg_8_3, __reg_8_3, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h + 0, __reg_9_2);
__reg_8_1 = __reg_7_1;
__CALC9(__reg_9_2, __reg_9_2, __reg_9_2, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h + 1, __reg_9_3);
__CALC9(__reg_9_3, __reg_9_3, __reg_9_3, __reg_9_3, __reg_9_4, __reg_8_1);
__STORE(__h + 2, __reg_9_4);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 17, __reg_9_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 16, __reg_9_1);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 15, __reg_9_2);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 14, __reg_9_3);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 13, __reg_9_4);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 12, __reg_9_0);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 11, __reg_9_1);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 10, __reg_9_2);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 9, __reg_9_3);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 8, __reg_9_4);
__reg_4_1 = __reg_3_1;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 7, __reg_9_0);
__reg_4_2 = __reg_3_2;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 6, __reg_9_1);
__reg_5_1 = __reg_4_1;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 5, __reg_9_2);
__reg_5_2 = __reg_4_2;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 4, __reg_9_3);
__reg_6_1 = __reg_5_1;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_4, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 3, __reg_9_4);
__reg_6_2 = __reg_5_2;
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 2, __reg_9_0);
__reg_7_1 = __reg_6_1;
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_4, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 1, __reg_9_1);
__reg_7_2 = __reg_6_2;
__CALC8(__reg_8_3, __reg_8_3, __reg_8_3, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h + 0, __reg_9_2);
__reg_8_1 = __reg_7_1;
__CALC8(__reg_8_4, __reg_8_4, __reg_8_4, __reg_8_4, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h + 1, __reg_9_3);
__reg_8_2 = __reg_7_2;
__CALC9(__reg_9_3, __reg_9_3, __reg_9_3, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h + 2, __reg_9_4);
__CALC9(__reg_9_4, __reg_9_4, __reg_9_4, __reg_9_4, __reg_9_0, __reg_8_2);
__STORE(__h + 3, __reg_9_0);
}
}
else
{
for (__h = 37; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 18, __reg_9_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 18, __reg_9_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 18, __reg_9_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 18, __reg_9_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 18, __reg_9_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 18, __reg_9_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 18, __reg_9_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 18, __reg_9_3);
__h++;
}
}
__global__ void kernel0_8(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 480;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_3_3;
float __reg_3_4;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_4_3;
float __reg_4_4;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_5_3;
float __reg_5_4;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_6_3;
float __reg_6_4;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
float __reg_7_3;
float __reg_7_4;
float __reg_8_0;
float __reg_8_1;
float __reg_8_2;
float __reg_8_3;
float __reg_8_4;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __storeValid = __writeValid8;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC6(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC7(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC8(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid8) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_0);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_0);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_0);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_0);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(2, __reg_8_2);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(3, __reg_8_3);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(4, __reg_8_4);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(5, __reg_8_0);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(6, __reg_8_1);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(7, __reg_8_2);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(8, __reg_8_3);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(9, __reg_8_4);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(10, __reg_8_0);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(11, __reg_8_1);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(12, __reg_8_2);
__LOAD(__reg_0, 29);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(13, __reg_8_3);
__LOAD(__reg_0, 30);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(14, __reg_8_4);
__LOAD(__reg_0, 31);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(15, __reg_8_0);
__LOAD(__reg_0, 32);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(16, __reg_8_1);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__LOAD(__reg_0, 29);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__LOAD(__reg_0, 30);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__LOAD(__reg_0, 31);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__LOAD(__reg_0, 32);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(16, __reg_8_1);
}
__a_sb = __a_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 33; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 16, __reg_8_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 16, __reg_8_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 16, __reg_8_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 16, __reg_8_1);
__h++;
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 15, __reg_8_3);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 14, __reg_8_4);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 13, __reg_8_0);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 12, __reg_8_1);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 11, __reg_8_2);
__reg_3_4 = __reg_2_4;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 10, __reg_8_3);
__reg_4_3 = __reg_3_3;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 9, __reg_8_4);
__reg_4_4 = __reg_3_4;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 8, __reg_8_0);
__reg_5_3 = __reg_4_3;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 7, __reg_8_1);
__reg_5_4 = __reg_4_4;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 6, __reg_8_2);
__reg_6_3 = __reg_5_3;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_1, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 5, __reg_8_3);
__reg_6_4 = __reg_5_4;
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 4, __reg_8_4);
__reg_7_3 = __reg_6_3;
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_1, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 3, __reg_8_0);
__reg_7_4 = __reg_6_4;
__CALC8(__reg_8_0, __reg_8_0, __reg_8_0, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 2, __reg_8_1);
__CALC8(__reg_8_1, __reg_8_1, __reg_8_1, __reg_8_1, __reg_8_2, __reg_7_4);
__STORE(__h - 1, __reg_8_2);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 15, __reg_8_3);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 14, __reg_8_4);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 13, __reg_8_0);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 12, __reg_8_1);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 11, __reg_8_2);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 10, __reg_8_3);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 9, __reg_8_4);
__reg_4_4 = __reg_3_4;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 8, __reg_8_0);
__reg_4_0 = __reg_3_0;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 7, __reg_8_1);
__reg_5_4 = __reg_4_4;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 6, __reg_8_2);
__reg_5_0 = __reg_4_0;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 5, __reg_8_3);
__reg_6_4 = __reg_5_4;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_2, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 4, __reg_8_4);
__reg_6_0 = __reg_5_0;
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 3, __reg_8_0);
__reg_7_4 = __reg_6_4;
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_2, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 2, __reg_8_1);
__reg_7_0 = __reg_6_0;
__CALC8(__reg_8_1, __reg_8_1, __reg_8_1, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 1, __reg_8_2);
__CALC8(__reg_8_2, __reg_8_2, __reg_8_2, __reg_8_2, __reg_8_3, __reg_7_0);
__STORE(__h + 0, __reg_8_3);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 15, __reg_8_3);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 14, __reg_8_4);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 13, __reg_8_0);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 12, __reg_8_1);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 11, __reg_8_2);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 10, __reg_8_3);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 9, __reg_8_4);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 8, __reg_8_0);
__reg_4_0 = __reg_3_0;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 7, __reg_8_1);
__reg_4_1 = __reg_3_1;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 6, __reg_8_2);
__reg_5_0 = __reg_4_0;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 5, __reg_8_3);
__reg_5_1 = __reg_4_1;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 4, __reg_8_4);
__reg_6_0 = __reg_5_0;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_3, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 3, __reg_8_0);
__reg_6_1 = __reg_5_1;
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 2, __reg_8_1);
__reg_7_0 = __reg_6_0;
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_3, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 1, __reg_8_2);
__reg_7_1 = __reg_6_1;
__CALC8(__reg_8_2, __reg_8_2, __reg_8_2, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h + 0, __reg_8_3);
__CALC8(__reg_8_3, __reg_8_3, __reg_8_3, __reg_8_3, __reg_8_4, __reg_7_1);
__STORE(__h + 1, __reg_8_4);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 15, __reg_8_3);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 14, __reg_8_4);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 13, __reg_8_0);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 12, __reg_8_1);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 11, __reg_8_2);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 10, __reg_8_3);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 9, __reg_8_4);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 8, __reg_8_0);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 7, __reg_8_1);
__reg_4_1 = __reg_3_1;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 6, __reg_8_2);
__reg_4_2 = __reg_3_2;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 5, __reg_8_3);
__reg_5_1 = __reg_4_1;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 4, __reg_8_4);
__reg_5_2 = __reg_4_2;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 3, __reg_8_0);
__reg_6_1 = __reg_5_1;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_4, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 2, __reg_8_1);
__reg_6_2 = __reg_5_2;
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 1, __reg_8_2);
__reg_7_1 = __reg_6_1;
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_4, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h + 0, __reg_8_3);
__reg_7_2 = __reg_6_2;
__CALC8(__reg_8_3, __reg_8_3, __reg_8_3, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h + 1, __reg_8_4);
__CALC8(__reg_8_4, __reg_8_4, __reg_8_4, __reg_8_4, __reg_8_0, __reg_7_2);
__STORE(__h + 2, __reg_8_0);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 15, __reg_8_3);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 14, __reg_8_4);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 13, __reg_8_0);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 12, __reg_8_1);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 11, __reg_8_2);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 10, __reg_8_3);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 9, __reg_8_4);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 8, __reg_8_0);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 7, __reg_8_1);
__reg_3_3 = __reg_2_3;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 6, __reg_8_2);
__reg_4_2 = __reg_3_2;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 5, __reg_8_3);
__reg_4_3 = __reg_3_3;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 4, __reg_8_4);
__reg_5_2 = __reg_4_2;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 3, __reg_8_0);
__reg_5_3 = __reg_4_3;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 2, __reg_8_1);
__reg_6_2 = __reg_5_2;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_0, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 1, __reg_8_2);
__reg_6_3 = __reg_5_3;
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h + 0, __reg_8_3);
__reg_7_2 = __reg_6_2;
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_0, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h + 1, __reg_8_4);
__reg_7_3 = __reg_6_3;
__CALC8(__reg_8_4, __reg_8_4, __reg_8_4, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h + 2, __reg_8_0);
__CALC8(__reg_8_0, __reg_8_0, __reg_8_0, __reg_8_0, __reg_8_1, __reg_7_3);
__STORE(__h + 3, __reg_8_1);
}
}
else
{
for (__h = 33; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 16, __reg_8_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 16, __reg_8_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 16, __reg_8_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 16, __reg_8_1);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 16, __reg_8_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 16, __reg_8_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 16, __reg_8_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 16, __reg_8_1);
__h++;
}
}
__global__ void kernel0_7(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 484;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_3_3;
float __reg_3_4;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_4_3;
float __reg_4_4;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_5_3;
float __reg_5_4;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_6_3;
float __reg_6_4;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
float __reg_7_3;
float __reg_7_4;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __storeValid = __writeValid7;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC6(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC7(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_0);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_0);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(2, __reg_7_2);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(3, __reg_7_3);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(4, __reg_7_4);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(5, __reg_7_0);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(6, __reg_7_1);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(7, __reg_7_2);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(8, __reg_7_3);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(9, __reg_7_4);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(10, __reg_7_0);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(11, __reg_7_1);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(12, __reg_7_2);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(13, __reg_7_3);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(14, __reg_7_4);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(14, __reg_7_4);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 29; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 14, __reg_7_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 14, __reg_7_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 14, __reg_7_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 14, __reg_7_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 13, __reg_7_1);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 12, __reg_7_2);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 11, __reg_7_3);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 10, __reg_7_4);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 9, __reg_7_0);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 8, __reg_7_1);
__reg_4_4 = __reg_3_4;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 7, __reg_7_2);
__reg_4_0 = __reg_3_0;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 6, __reg_7_3);
__reg_5_4 = __reg_4_4;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 5, __reg_7_4);
__reg_5_0 = __reg_4_0;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 4, __reg_7_0);
__reg_6_4 = __reg_5_4;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_2, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 3, __reg_7_1);
__reg_6_0 = __reg_5_0;
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 2, __reg_7_2);
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_2, __reg_7_3, __reg_6_0);
__STORE(__h - 1, __reg_7_3);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 13, __reg_7_1);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 12, __reg_7_2);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 11, __reg_7_3);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 10, __reg_7_4);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 9, __reg_7_0);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 8, __reg_7_1);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 7, __reg_7_2);
__reg_4_0 = __reg_3_0;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 6, __reg_7_3);
__reg_4_1 = __reg_3_1;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 5, __reg_7_4);
__reg_5_0 = __reg_4_0;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 4, __reg_7_0);
__reg_5_1 = __reg_4_1;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 3, __reg_7_1);
__reg_6_0 = __reg_5_0;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_3, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 2, __reg_7_2);
__reg_6_1 = __reg_5_1;
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 1, __reg_7_3);
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_3, __reg_7_4, __reg_6_1);
__STORE(__h + 0, __reg_7_4);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 13, __reg_7_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 12, __reg_7_2);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 11, __reg_7_3);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 10, __reg_7_4);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 9, __reg_7_0);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 8, __reg_7_1);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 7, __reg_7_2);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 6, __reg_7_3);
__reg_4_1 = __reg_3_1;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 5, __reg_7_4);
__reg_4_2 = __reg_3_2;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 4, __reg_7_0);
__reg_5_1 = __reg_4_1;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 3, __reg_7_1);
__reg_5_2 = __reg_4_2;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 2, __reg_7_2);
__reg_6_1 = __reg_5_1;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_4, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 1, __reg_7_3);
__reg_6_2 = __reg_5_2;
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h + 0, __reg_7_4);
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_4, __reg_7_0, __reg_6_2);
__STORE(__h + 1, __reg_7_0);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 13, __reg_7_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 12, __reg_7_2);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 11, __reg_7_3);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 10, __reg_7_4);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 9, __reg_7_0);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 8, __reg_7_1);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 7, __reg_7_2);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 6, __reg_7_3);
__reg_3_3 = __reg_2_3;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 5, __reg_7_4);
__reg_4_2 = __reg_3_2;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 4, __reg_7_0);
__reg_4_3 = __reg_3_3;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 3, __reg_7_1);
__reg_5_2 = __reg_4_2;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 2, __reg_7_2);
__reg_5_3 = __reg_4_3;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 1, __reg_7_3);
__reg_6_2 = __reg_5_2;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_0, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h + 0, __reg_7_4);
__reg_6_3 = __reg_5_3;
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h + 1, __reg_7_0);
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_0, __reg_7_1, __reg_6_3);
__STORE(__h + 2, __reg_7_1);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 13, __reg_7_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 12, __reg_7_2);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 11, __reg_7_3);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 10, __reg_7_4);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 9, __reg_7_0);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 8, __reg_7_1);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 7, __reg_7_2);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 6, __reg_7_3);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 5, __reg_7_4);
__reg_3_4 = __reg_2_4;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 4, __reg_7_0);
__reg_4_3 = __reg_3_3;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 3, __reg_7_1);
__reg_4_4 = __reg_3_4;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 2, __reg_7_2);
__reg_5_3 = __reg_4_3;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 1, __reg_7_3);
__reg_5_4 = __reg_4_4;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h + 0, __reg_7_4);
__reg_6_3 = __reg_5_3;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_1, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h + 1, __reg_7_0);
__reg_6_4 = __reg_5_4;
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h + 2, __reg_7_1);
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_1, __reg_7_2, __reg_6_4);
__STORE(__h + 3, __reg_7_2);
}
}
else
{
for (__h = 29; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 14, __reg_7_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 14, __reg_7_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 14, __reg_7_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 14, __reg_7_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 14, __reg_7_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 14, __reg_7_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 14, __reg_7_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 14, __reg_7_4);
__h++;
}
}
__global__ void kernel0_6(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_3_3;
float __reg_3_4;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_4_3;
float __reg_4_4;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_5_3;
float __reg_5_4;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_6_3;
float __reg_6_4;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __storeValid = __writeValid6;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC6(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(2, __reg_6_2);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(3, __reg_6_3);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(4, __reg_6_4);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(5, __reg_6_0);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(6, __reg_6_1);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(7, __reg_6_2);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(8, __reg_6_3);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(9, __reg_6_4);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(10, __reg_6_0);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(11, __reg_6_1);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(12, __reg_6_2);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(12, __reg_6_2);
}
__a_sb = __a_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 25; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 12, __reg_6_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 12, __reg_6_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 12, __reg_6_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 12, __reg_6_2);
__h++;
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 11, __reg_6_4);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 10, __reg_6_0);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 9, __reg_6_1);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 8, __reg_6_2);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 7, __reg_6_3);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 6, __reg_6_4);
__reg_4_0 = __reg_3_0;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 5, __reg_6_0);
__reg_4_1 = __reg_3_1;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 4, __reg_6_1);
__reg_5_0 = __reg_4_0;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 3, __reg_6_2);
__reg_5_1 = __reg_4_1;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 2, __reg_6_3);
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_3, __reg_6_4, __reg_5_1);
__STORE(__h - 1, __reg_6_4);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 11, __reg_6_4);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 10, __reg_6_0);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 9, __reg_6_1);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 8, __reg_6_2);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 7, __reg_6_3);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 6, __reg_6_4);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 5, __reg_6_0);
__reg_4_1 = __reg_3_1;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 4, __reg_6_1);
__reg_4_2 = __reg_3_2;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 3, __reg_6_2);
__reg_5_1 = __reg_4_1;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 2, __reg_6_3);
__reg_5_2 = __reg_4_2;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 1, __reg_6_4);
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_4, __reg_6_0, __reg_5_2);
__STORE(__h + 0, __reg_6_0);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 11, __reg_6_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 10, __reg_6_0);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 9, __reg_6_1);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 8, __reg_6_2);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 7, __reg_6_3);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 6, __reg_6_4);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 5, __reg_6_0);
__reg_3_3 = __reg_2_3;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 4, __reg_6_1);
__reg_4_2 = __reg_3_2;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 3, __reg_6_2);
__reg_4_3 = __reg_3_3;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 2, __reg_6_3);
__reg_5_2 = __reg_4_2;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 1, __reg_6_4);
__reg_5_3 = __reg_4_3;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h + 0, __reg_6_0);
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_0, __reg_6_1, __reg_5_3);
__STORE(__h + 1, __reg_6_1);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 11, __reg_6_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 10, __reg_6_0);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 9, __reg_6_1);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 8, __reg_6_2);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 7, __reg_6_3);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 6, __reg_6_4);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 5, __reg_6_0);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 4, __reg_6_1);
__reg_3_4 = __reg_2_4;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 3, __reg_6_2);
__reg_4_3 = __reg_3_3;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 2, __reg_6_3);
__reg_4_4 = __reg_3_4;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 1, __reg_6_4);
__reg_5_3 = __reg_4_3;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h + 0, __reg_6_0);
__reg_5_4 = __reg_4_4;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h + 1, __reg_6_1);
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_1, __reg_6_2, __reg_5_4);
__STORE(__h + 2, __reg_6_2);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 11, __reg_6_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 10, __reg_6_0);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 9, __reg_6_1);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 8, __reg_6_2);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 7, __reg_6_3);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 6, __reg_6_4);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 5, __reg_6_0);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 4, __reg_6_1);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 3, __reg_6_2);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 2, __reg_6_3);
__reg_4_4 = __reg_3_4;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 1, __reg_6_4);
__reg_4_0 = __reg_3_0;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h + 0, __reg_6_0);
__reg_5_4 = __reg_4_4;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h + 1, __reg_6_1);
__reg_5_0 = __reg_4_0;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h + 2, __reg_6_2);
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_2, __reg_6_3, __reg_5_0);
__STORE(__h + 3, __reg_6_3);
}
}
else
{
for (__h = 25; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 12, __reg_6_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 12, __reg_6_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 12, __reg_6_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 12, __reg_6_2);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 12, __reg_6_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 12, __reg_6_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 12, __reg_6_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 12, __reg_6_2);
__h++;
}
}
__global__ void kernel0_5(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_3_3;
float __reg_3_4;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_4_3;
float __reg_4_4;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_5_3;
float __reg_5_4;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __storeValid = __writeValid5;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(2, __reg_5_2);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(3, __reg_5_3);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(4, __reg_5_4);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(5, __reg_5_0);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(6, __reg_5_1);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(7, __reg_5_2);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(8, __reg_5_3);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(9, __reg_5_4);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(10, __reg_5_0);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(10, __reg_5_0);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 10, __reg_5_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 10, __reg_5_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 10, __reg_5_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 10, __reg_5_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 9, __reg_5_2);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 8, __reg_5_3);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 7, __reg_5_4);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 6, __reg_5_0);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 5, __reg_5_1);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 4, __reg_5_2);
__reg_4_1 = __reg_3_1;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 3, __reg_5_3);
__reg_4_2 = __reg_3_2;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 2, __reg_5_4);
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2);
__STORE(__h - 1, __reg_5_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 9, __reg_5_2);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 8, __reg_5_3);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 7, __reg_5_4);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 6, __reg_5_0);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 5, __reg_5_1);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 4, __reg_5_2);
__reg_3_3 = __reg_2_3;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 3, __reg_5_3);
__reg_4_2 = __reg_3_2;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 2, __reg_5_4);
__reg_4_3 = __reg_3_3;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 1, __reg_5_0);
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3);
__STORE(__h + 0, __reg_5_1);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 9, __reg_5_2);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 8, __reg_5_3);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 7, __reg_5_4);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 6, __reg_5_0);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 5, __reg_5_1);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 4, __reg_5_2);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 3, __reg_5_3);
__reg_3_4 = __reg_2_4;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 2, __reg_5_4);
__reg_4_3 = __reg_3_3;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 1, __reg_5_0);
__reg_4_4 = __reg_3_4;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h + 0, __reg_5_1);
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4);
__STORE(__h + 1, __reg_5_2);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 9, __reg_5_2);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 8, __reg_5_3);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 7, __reg_5_4);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 6, __reg_5_0);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 5, __reg_5_1);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 4, __reg_5_2);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 3, __reg_5_3);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 2, __reg_5_4);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 1, __reg_5_0);
__reg_4_4 = __reg_3_4;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h + 0, __reg_5_1);
__reg_4_0 = __reg_3_0;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h + 1, __reg_5_2);
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0);
__STORE(__h + 2, __reg_5_3);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 9, __reg_5_2);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 8, __reg_5_3);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 7, __reg_5_4);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 6, __reg_5_0);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 5, __reg_5_1);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 4, __reg_5_2);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 3, __reg_5_3);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 2, __reg_5_4);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 1, __reg_5_0);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h + 0, __reg_5_1);
__reg_4_0 = __reg_3_0;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h + 1, __reg_5_2);
__reg_4_1 = __reg_3_1;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h + 2, __reg_5_3);
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1);
__STORE(__h + 3, __reg_5_4);
}
}
else
{
for (__h = 21; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 10, __reg_5_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 10, __reg_5_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 10, __reg_5_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 10, __reg_5_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 10, __reg_5_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 10, __reg_5_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 10, __reg_5_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 10, __reg_5_0);
__h++;
}
}
__global__ void kernel0_4(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_3_3;
float __reg_3_4;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_4_3;
float __reg_4_4;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __storeValid = __writeValid4;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(2, __reg_4_2);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(3, __reg_4_3);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(4, __reg_4_4);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(5, __reg_4_0);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(6, __reg_4_1);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(7, __reg_4_2);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(8, __reg_4_3);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(8, __reg_4_3);
}
__a_sb = __a_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 8, __reg_4_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 8, __reg_4_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 8, __reg_4_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 8, __reg_4_3);
__h++;
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 7, __reg_4_0);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 6, __reg_4_1);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 5, __reg_4_2);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 4, __reg_4_3);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 3, __reg_4_4);
__reg_3_3 = __reg_2_3;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 2, __reg_4_0);
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3);
__STORE(__h - 1, __reg_4_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 7, __reg_4_0);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 6, __reg_4_1);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 5, __reg_4_2);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 4, __reg_4_3);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 3, __reg_4_4);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 2, __reg_4_0);
__reg_3_4 = __reg_2_4;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 1, __reg_4_1);
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4);
__STORE(__h + 0, __reg_4_2);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 7, __reg_4_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 6, __reg_4_1);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 5, __reg_4_2);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 4, __reg_4_3);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 3, __reg_4_4);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 2, __reg_4_0);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 1, __reg_4_1);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h + 0, __reg_4_2);
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0);
__STORE(__h + 1, __reg_4_3);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 7, __reg_4_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 6, __reg_4_1);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 5, __reg_4_2);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 4, __reg_4_3);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 3, __reg_4_4);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 2, __reg_4_0);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 1, __reg_4_1);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h + 0, __reg_4_2);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h + 1, __reg_4_3);
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1);
__STORE(__h + 2, __reg_4_4);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 7, __reg_4_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 6, __reg_4_1);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 5, __reg_4_2);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 4, __reg_4_3);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 3, __reg_4_4);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 2, __reg_4_0);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 1, __reg_4_1);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h + 0, __reg_4_2);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h + 1, __reg_4_3);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h + 2, __reg_4_4);
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2);
__STORE(__h + 3, __reg_4_0);
}
}
else
{
for (__h = 17; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 8, __reg_4_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 8, __reg_4_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 8, __reg_4_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 8, __reg_4_3);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 8, __reg_4_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 8, __reg_4_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 8, __reg_4_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 8, __reg_4_3);
__h++;
}
}
__global__ void kernel0_3(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_3_3;
float __reg_3_4;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __storeValid = __writeValid3;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(2, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(3, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(4, __reg_3_4);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(5, __reg_3_0);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(6, __reg_3_1);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(6, __reg_3_1);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 6, __reg_3_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 6, __reg_3_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 6, __reg_3_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 6, __reg_3_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 5, __reg_3_3);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 4, __reg_3_4);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 3, __reg_3_0);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 2, __reg_3_1);
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__STORE(__h - 1, __reg_3_2);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 5, __reg_3_3);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 4, __reg_3_4);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 3, __reg_3_0);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 2, __reg_3_1);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 1, __reg_3_2);
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__STORE(__h + 0, __reg_3_3);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 5, __reg_3_3);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 4, __reg_3_4);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 3, __reg_3_0);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 2, __reg_3_1);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 1, __reg_3_2);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h + 0, __reg_3_3);
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__STORE(__h + 1, __reg_3_4);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 5, __reg_3_3);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 4, __reg_3_4);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 3, __reg_3_0);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 2, __reg_3_1);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 1, __reg_3_2);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h + 0, __reg_3_3);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h + 1, __reg_3_4);
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__STORE(__h + 2, __reg_3_0);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 5, __reg_3_3);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 4, __reg_3_4);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 3, __reg_3_0);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 2, __reg_3_1);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 1, __reg_3_2);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h + 0, __reg_3_3);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h + 1, __reg_3_4);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h + 2, __reg_3_0);
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__STORE(__h + 3, __reg_3_1);
}
}
else
{
for (__h = 13; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 6, __reg_3_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 6, __reg_3_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 6, __reg_3_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 6, __reg_3_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 6, __reg_3_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 6, __reg_3_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 6, __reg_3_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 6, __reg_3_1);
__h++;
}
}
__global__ void kernel0_2(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __storeValid = __writeValid2;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(2, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(3, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(4, __reg_2_4);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(4, __reg_2_4);
}
__a_sb = __a_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 4, __reg_2_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 4, __reg_2_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(__h - 4, __reg_2_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(__h - 4, __reg_2_4);
__h++;
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 3, __reg_2_1);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 2, __reg_2_2);
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__STORE(__h - 1, __reg_2_3);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 3, __reg_2_1);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 2, __reg_2_2);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(__h - 1, __reg_2_3);
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__STORE(__h + 0, __reg_2_4);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 3, __reg_2_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 2, __reg_2_2);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(__h - 1, __reg_2_3);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(__h + 0, __reg_2_4);
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__STORE(__h + 1, __reg_2_0);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 3, __reg_2_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 2, __reg_2_2);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(__h - 1, __reg_2_3);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(__h + 0, __reg_2_4);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h + 1, __reg_2_0);
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__STORE(__h + 2, __reg_2_1);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 3, __reg_2_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 2, __reg_2_2);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(__h - 1, __reg_2_3);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(__h + 0, __reg_2_4);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h + 1, __reg_2_0);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h + 2, __reg_2_1);
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__STORE(__h + 3, __reg_2_2);
}
}
else
{
for (__h = 9; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 4, __reg_2_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 4, __reg_2_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(__h - 4, __reg_2_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(__h - 4, __reg_2_4);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 4, __reg_2_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 4, __reg_2_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(__h - 4, __reg_2_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(__h - 4, __reg_2_4);
__h++;
}
}
__global__ void kernel0_1(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(2, __reg_1_2);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(2, __reg_1_2);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 2, __reg_1_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 2, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 2, __reg_1_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 2, __reg_1_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__STORE(__h - 1, __reg_1_4);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 1, __reg_1_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 1, __reg_1_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 1, __reg_1_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 1, __reg_1_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
}
}
else
{
for (__h = 5; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 2, __reg_1_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 2, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 2, __reg_1_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 2, __reg_1_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 2, __reg_1_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 2, __reg_1_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 2, __reg_1_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 2, __reg_1_2);
__h++;
}
}
|
d77eb0510cf044fc6d520ab3991a367b63110f1f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
__global__ void calculate_g_image_gpu(float* in, float* out, int w, int h){
int x = blockDim.x * blockIdx.x + threadIdx.x;
int j = x % w;
int i = x / w;
if (1 <= i && i < h && 1 <= j && j < w) {
float val = pow((in[(i+1)*w+j]-in[(i-1)*w+j])/2, 2) + pow((in[i*w+j+1]-in[i*w+j-1])/2, 2);
float lambda = 3.5;
out[i*w+j] = exp(-pow(val, 2)/2/pow(lambda, 2));
}
}
__global__ void copy_gpu(float* src, float* dest, int w, int h) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int j = x % w;
int i = x / w;
dest[i*w+j] = src[i*w+j];
}
__device__ float arithmetic_mean_gpu(float n1, float n2) {
return (n1+n2)/2.0;
}
__global__ void apply_stencil_gpu(float* in, float* out, float* g, int w, int h, float time_step) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int j = x % w;
int i = x / w;
if (1 <= i && i < h && 1 <= j && j < w) {
float val = in[i*w+j]*(1-time_step*(arithmetic_mean_gpu(g[i*w+j], g[(i+1)*w+j]) +
arithmetic_mean_gpu(g[i*w+j], g[i*w+j+1]) +
arithmetic_mean_gpu(g[i*w+j], g[i*w+j-1])));
val += in[(i+1)*w+j]*time_step*arithmetic_mean_gpu(g[i*w+j], g[(i+1)*w+j]);
val += in[(i-1)*w+j]*time_step*arithmetic_mean_gpu(g[i*w+j], g[(i-1)*w+j]);
val += in[i*w+j+1]*time_step*arithmetic_mean_gpu(g[i*w+j], g[i*w+j+1]);
val += in[i*w+j-1]*time_step*arithmetic_mean_gpu(g[i*w+j], g[i*w+j-1]);
val = (val < 0 ? 0 : val);
val = (val > 255 ? 255 : val);
out[i*w+j] = val;
}
}
void gpu_func(float* in, float* out, float* g_img, int w, int h, int n_iters){
int device = 0;
int n = w*h;
hipSetDevice(device);
float* in_dev;
float* out_dev;
float* g_dev;
hipMallocManaged(&in_dev, n * sizeof(float));
hipMallocManaged(&out_dev, n * sizeof(float));
hipMallocManaged(&g_dev, n * sizeof(float));
for (int i = 0; i < n; i++) in_dev[i] = in[i];
dim3 blockDim(16);
dim3 gridDim((n % blockDim.x) ? n / blockDim.x : n / blockDim.x + 1);
for (int t=0; t<n_iters; t++) {
hipLaunchKernelGGL(( calculate_g_image_gpu), dim3(gridDim), dim3(blockDim), 0, 0, in_dev, g_dev, w, h);
hipLaunchKernelGGL(( apply_stencil_gpu), dim3(gridDim), dim3(blockDim), 0, 0, in_dev, out_dev, g_dev, w, h, t);
hipLaunchKernelGGL(( copy_gpu), dim3(gridDim), dim3(blockDim), 0, 0, out_dev, in_dev, w, h);
}
hipDeviceSynchronize();
printf("Done executing CUDA kernel\n");
for (int i = 0; i < n; i++) out[i] = out_dev[i];
for (int i = 0; i < n; i++) g_img[i] = g_dev[i];
hipFree(in_dev);
hipFree(out_dev);
hipFree(g_dev);
}
|
d77eb0510cf044fc6d520ab3991a367b63110f1f.cu
|
#include <stdio.h>
#include <cuda.h>
__global__ void calculate_g_image_gpu(float* in, float* out, int w, int h){
int x = blockDim.x * blockIdx.x + threadIdx.x;
int j = x % w;
int i = x / w;
if (1 <= i && i < h && 1 <= j && j < w) {
float val = pow((in[(i+1)*w+j]-in[(i-1)*w+j])/2, 2) + pow((in[i*w+j+1]-in[i*w+j-1])/2, 2);
float lambda = 3.5;
out[i*w+j] = exp(-pow(val, 2)/2/pow(lambda, 2));
}
}
__global__ void copy_gpu(float* src, float* dest, int w, int h) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int j = x % w;
int i = x / w;
dest[i*w+j] = src[i*w+j];
}
__device__ float arithmetic_mean_gpu(float n1, float n2) {
return (n1+n2)/2.0;
}
__global__ void apply_stencil_gpu(float* in, float* out, float* g, int w, int h, float time_step) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int j = x % w;
int i = x / w;
if (1 <= i && i < h && 1 <= j && j < w) {
float val = in[i*w+j]*(1-time_step*(arithmetic_mean_gpu(g[i*w+j], g[(i+1)*w+j]) +
arithmetic_mean_gpu(g[i*w+j], g[i*w+j+1]) +
arithmetic_mean_gpu(g[i*w+j], g[i*w+j-1])));
val += in[(i+1)*w+j]*time_step*arithmetic_mean_gpu(g[i*w+j], g[(i+1)*w+j]);
val += in[(i-1)*w+j]*time_step*arithmetic_mean_gpu(g[i*w+j], g[(i-1)*w+j]);
val += in[i*w+j+1]*time_step*arithmetic_mean_gpu(g[i*w+j], g[i*w+j+1]);
val += in[i*w+j-1]*time_step*arithmetic_mean_gpu(g[i*w+j], g[i*w+j-1]);
val = (val < 0 ? 0 : val);
val = (val > 255 ? 255 : val);
out[i*w+j] = val;
}
}
void gpu_func(float* in, float* out, float* g_img, int w, int h, int n_iters){
int device = 0;
int n = w*h;
cudaSetDevice(device);
float* in_dev;
float* out_dev;
float* g_dev;
cudaMallocManaged(&in_dev, n * sizeof(float));
cudaMallocManaged(&out_dev, n * sizeof(float));
cudaMallocManaged(&g_dev, n * sizeof(float));
for (int i = 0; i < n; i++) in_dev[i] = in[i];
dim3 blockDim(16);
dim3 gridDim((n % blockDim.x) ? n / blockDim.x : n / blockDim.x + 1);
for (int t=0; t<n_iters; t++) {
calculate_g_image_gpu<<<gridDim, blockDim>>>(in_dev, g_dev, w, h);
apply_stencil_gpu<<<gridDim, blockDim>>>(in_dev, out_dev, g_dev, w, h, t);
copy_gpu<<<gridDim, blockDim>>>(out_dev, in_dev, w, h);
}
cudaDeviceSynchronize();
printf("Done executing CUDA kernel\n");
for (int i = 0; i < n; i++) out[i] = out_dev[i];
for (int i = 0; i < n; i++) g_img[i] = g_dev[i];
cudaFree(in_dev);
cudaFree(out_dev);
cudaFree(g_dev);
}
|
05356c27c0e347781b82d9ea7eac80766b34d692.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
__global__ void vecAdd(float* A, float* B, float* C) ;
int main(int argc, char **argv)
{
int i, N = 720896; /* default vector size */
float *A, *dev_a;
float *B, *dev_b;
float *C, *dev_c;
hipEvent_t begin, stop;
float rt;
/* check for user-supplied vector size */
if (argc > 1)
N = atoi(argv[1]);
printf("Running GPU vecAdd for %i elements\n", N);
/* allocate memory - host */
A = (float*)malloc(N * sizeof(float));
B = (float*)malloc(N * sizeof(float));
C = (float*)malloc(N * sizeof(float));
for (i = 0; i < N; i++) /* generate random data */
{
A[i] = (float)random();
B[i] = (float)RAND_MAX - A[i];
}
/* allocate memory - GPU */
hipError_t err;
err = hipMalloc((void**)&dev_a, N * sizeof(float));
if (err != hipSuccess)
{
fprintf(stderr, "hipMalloc ERROR : , %s.\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMalloc((void**)&dev_b, N * sizeof(float));
if (err != hipSuccess)
{
fprintf(stderr, "hipMalloc ERROR : , %s.\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMalloc((void**)&dev_c, N * sizeof(float));
if (err != hipSuccess)
{
fprintf(stderr, "hipMalloc ERROR : , %s.\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copie les donnees HOST -> GPU
hipMemcpy(dev_a, A, N * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_b, B, N * sizeof(float), hipMemcpyHostToDevice);
/* On cree les timer et on lance begin */
hipEventCreate(&begin);
hipEventCreate(&stop);
hipEventRecord(begin, 0);
/* On appelle la methode */
hipLaunchKernelGGL(( vecAdd), dim3(N/512), dim3(512), 0, 0, dev_a, dev_b, dev_c);
// Copie les donnees de GPU -> HOST
hipMemcpy(C, dev_c, N * sizeof(float), hipMemcpyDeviceToHost);
/* On arrete le chrono et on compare begin et stop */
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&rt, begin, stop); /* in milliseconds */
rt /= 1E3;
printf("time=%.4f seconds, MFLOPS=%.1f\n", rt, (float)N/rt/1E6);
/* On supprime les timers */
hipEventDestroy(begin);
hipEventDestroy(stop);
/* Affiche les 10 premiers resultats */
for (i = 0; i < 10; i++)
printf("C[%i]=%.2f\n", i, C[i]);
/* Libere la memoire du host */
free(A);
free(B);
free(C);
/* Libere la memoire GPU */
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return EXIT_SUCCESS;
}
__global__ void vecAdd(float* A, float* B, float* C)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
C[i] = A[i] + B[i];
}
|
05356c27c0e347781b82d9ea7eac80766b34d692.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
__global__ void vecAdd(float* A, float* B, float* C) ;
int main(int argc, char **argv)
{
int i, N = 720896; /* default vector size */
float *A, *dev_a;
float *B, *dev_b;
float *C, *dev_c;
cudaEvent_t begin, stop;
float rt;
/* check for user-supplied vector size */
if (argc > 1)
N = atoi(argv[1]);
printf("Running GPU vecAdd for %i elements\n", N);
/* allocate memory - host */
A = (float*)malloc(N * sizeof(float));
B = (float*)malloc(N * sizeof(float));
C = (float*)malloc(N * sizeof(float));
for (i = 0; i < N; i++) /* generate random data */
{
A[i] = (float)random();
B[i] = (float)RAND_MAX - A[i];
}
/* allocate memory - GPU */
cudaError_t err;
err = cudaMalloc((void**)&dev_a, N * sizeof(float));
if (err != cudaSuccess)
{
fprintf(stderr, "cudaMalloc ERROR : , %s.\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc((void**)&dev_b, N * sizeof(float));
if (err != cudaSuccess)
{
fprintf(stderr, "cudaMalloc ERROR : , %s.\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc((void**)&dev_c, N * sizeof(float));
if (err != cudaSuccess)
{
fprintf(stderr, "cudaMalloc ERROR : , %s.\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copie les donnees HOST -> GPU
cudaMemcpy(dev_a, A, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, B, N * sizeof(float), cudaMemcpyHostToDevice);
/* On cree les timer et on lance begin */
cudaEventCreate(&begin);
cudaEventCreate(&stop);
cudaEventRecord(begin, 0);
/* On appelle la methode */
vecAdd<<<N/512, 512>>>(dev_a, dev_b, dev_c);
// Copie les donnees de GPU -> HOST
cudaMemcpy(C, dev_c, N * sizeof(float), cudaMemcpyDeviceToHost);
/* On arrete le chrono et on compare begin et stop */
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&rt, begin, stop); /* in milliseconds */
rt /= 1E3;
printf("time=%.4f seconds, MFLOPS=%.1f\n", rt, (float)N/rt/1E6);
/* On supprime les timers */
cudaEventDestroy(begin);
cudaEventDestroy(stop);
/* Affiche les 10 premiers resultats */
for (i = 0; i < 10; i++)
printf("C[%i]=%.2f\n", i, C[i]);
/* Libere la memoire du host */
free(A);
free(B);
free(C);
/* Libere la memoire GPU */
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return EXIT_SUCCESS;
}
__global__ void vecAdd(float* A, float* B, float* C)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
C[i] = A[i] + B[i];
}
|
c4a6b6ac0db6812b3485559903634658e168665b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kReciprocal(float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x)
target[i] = 1 / gData[i];
}
|
c4a6b6ac0db6812b3485559903634658e168665b.cu
|
#include "includes.h"
__global__ void kReciprocal(float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x)
target[i] = 1 / gData[i];
}
|
49296cb6d3c109622781e1294667031081a8d068.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cusparse_v2.h"
#include "device_launch_parameters.h"
#include <cstdio>
#include <cstdlib>
#include <iostream>
#include <vector>
#include <cassert>
#define imin(a,b) (a<b?a:b)
#define imax(a,b) (a>b?a:b)
const int EFDN = 10;
void efdEuroCallPrice_cuda(float S, float sigma, float tau, float K, float r);
void efdForward_1d_BM_cuda(float S, float tau);
void efdForward_2d_BM_cuda(float S, float V, float tau);
void example2dArray();
void ifdEuroCallPrice_cuda(double S0, double sigma, double tau, double K, double r);
__global__ void EFD(
int size,
float *d_val_n,
float *d_val_npo,
float Pu, float Pm, float Pd,
float x0, float x
);
__global__ void EFD_1dBM(
int size,
float *d_val_n,
float *d_val_npo,
float Pu, float Pm, float Pd
);
__global__ void EFD_2dBM(
int width, int height, int pitch_n, int pitch_npo,
float *d_val_n,
float *d_val_npo,
float alpha, float beta
);
__global__ void modify_i_j(
int width, int height, int pitch,
float *d_array,
int i, int j, float change_to
);
__global__ void IFD_boundary(
int size,
double *d_Price,
double lambda_U,
double lambda_L
);
int main(void) {
//// Example 1: forward 1D BM
//efdForward_1d_BM_cuda(100, 2);
//// Example 2: backward 1d BS
float S0 = 358.52, sigma = 0.230967, tau = 0.145205, K = 360.0, r = 0.06;
efdEuroCallPrice_cuda(S0, sigma, tau, K, r);
// Example 3: 2D array example
//example2dArray();
// Example 4: forward 2D BM
//efdForward_2d_BM_cuda(100, 100, 2);
// Example 5: backward 1D BS
//float S0 = 358.52, sigma = 0.230967, tau = 0.145205, K = 360.0, r = 0.06;
ifdEuroCallPrice_cuda(S0, sigma, tau, K, r);
}
void efdForward_2d_BM_cuda(float S, float V, float tau){
//construct the 2D array
float ST_max = S + 4 * sqrt(tau);
const int width = 2 * EFDN + 1;
float s = (ST_max - S) / (EFDN + 0.0);
float VT_max = V + 4 * sqrt(tau);
const int height = 2 * EFDN + 1;
float v = (VT_max - V) / (EFDN + 0.0);
float h_P0[width][height];
//initial density:
for (int i = 0; i < width; i++){
for (int j = 0; j < height; j++){
h_P0[i][j] = 0.0;
}
}
h_P0[EFDN][EFDN] = 1.0;
//time step
int n = 100;
float t = tau / n;
//coefficients from the PDE:
float alpha = t / 2.0 / s / s;
float beta = t / 2.0 / v / v;
//pass the 2D grid to device
//what is pitch? http://stackoverflow.com/questions/16119943/how-and-when-should-i-use-pitched-pointer-with-the-cuda-api
size_t h_pitch = width * sizeof(float); //host original array pitch in bytes, number of bytes in one row
size_t d_pitch0, d_pitch1;// pitch for the device array
float *d_ptr0, *d_ptr1;
hipMallocPitch(&d_ptr0, &d_pitch0, width * sizeof(float), height);
hipMallocPitch(&d_ptr1, &d_pitch1, width * sizeof(float), height);
hipMemcpy2D(d_ptr0, d_pitch0, h_P0, h_pitch, width * sizeof(float), height, hipMemcpyHostToDevice);
hipMemcpy2D(d_ptr1, d_pitch1, h_P0, h_pitch, width * sizeof(float), height, hipMemcpyHostToDevice);
//calculate forward
for (int i = 0; i < n; i++)
{
if (i % 2 == 0)
{
EFD_2dBM << < height, width >> >(
width, height, d_pitch0, d_pitch1,
d_ptr0,
d_ptr1,
alpha, beta
);
}
else
{
EFD_2dBM << <height, width >> >(
width, height, d_pitch1, d_pitch0,
d_ptr1,
d_ptr0,
alpha, beta);
}
}
//copy the result back to the host
if ((n - 1) % 2 == 0){
//hipMemcpy(h_P0, d_P_1, N * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy2D(h_P0, h_pitch, d_ptr1, d_pitch1, width * sizeof(float), height, hipMemcpyDeviceToHost);
}
else
{
hipMemcpy2D(h_P0, h_pitch, d_ptr0, d_pitch0, width * sizeof(float), height, hipMemcpyDeviceToHost);
}
//output the result
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
//std::cout << h_P0[i][j] << "\t";
printf("%.2f\t", h_P0[i][j]);
}
std::cout << std::endl;
}
}
__global__ void EFD_2dBM(
int width, int height, int pitch_n, int pitch_npo,
float *d_val_n,
float *d_val_npo,
float alpha, float beta
){
int idx = blockIdx.x; //row
int idy = threadIdx.x; //column
if ((idx < height) && (idy <width)){
//d_val_npo[i] = Pu * d_val_n[i + 1] + Pm * d_val_n[i] + Pd * d_val_n[i - 1];
d_val_npo[idx*(pitch_npo / sizeof(float)) + idy] = alpha*(d_val_n[(idx + 1)*(pitch_n / sizeof(float)) + idy]
+ d_val_n[(idx - 1)*(pitch_n / sizeof(float)) + idy])
+ beta*(d_val_n[idx*(pitch_n / sizeof(float)) + idy + 1]
+ d_val_n[idx*(pitch_n / sizeof(float)) + idy - 1])
+ (1.0 - 2.0*alpha - 2.0*beta)*d_val_n[idx*(pitch_n / sizeof(float)) + idy];
//modify the ones on the top
if (idx == 0){
d_val_npo[idx*(pitch_npo / sizeof(float)) + idy] = d_val_npo[(idx + 1)*(pitch_npo / sizeof(float)) + idy];
}
//modify the ones on the bottom
if (idx == (height - 1)){
d_val_npo[idx*(pitch_npo / sizeof(float)) + idy] = d_val_npo[(idx - 1)*(pitch_npo / sizeof(float)) + idy];
}
//modify the ones on the left
if (idy == 0){
d_val_npo[idx*(pitch_npo / sizeof(float)) + idy] = d_val_npo[(idx - 1)*(pitch_npo / sizeof(float)) + idy + 1];
}
//modify the ones on the right
if (idx == (width - 1)){
d_val_npo[idx*(pitch_npo / sizeof(float)) + idy] = d_val_npo[(idx - 1)*(pitch_npo / sizeof(float)) + idy - 1];
}
}
}
void example2dArray(){
std::cout << "Host main" << std::endl;
// Host code
const int width = 3;
const int height = 3;
float* devPtr;
float a[width][height];
//load and display input array
std::cout << "a array: " << std::endl;
for (int i = 0; i < width; i++)
{
for (int j = 0; j < height; j++)
{
a[i][j] = i + j;
std::cout << a[i][j] << " ";
}
std::cout << std::endl;
}
std::cout << std::endl;
//Allocating Device memory for 2D array using pitch
size_t host_orig_pitch = width * sizeof(float); //host original array pitch in bytes
size_t pitch;// pitch for the device array
hipMallocPitch(&devPtr, &pitch, width * sizeof(float), height);
std::cout << "host_orig_pitch: " << host_orig_pitch << std::endl;
std::cout << "sizeof(float): " << sizeof(float) << std::endl;
std::cout << "width: " << width << std::endl;
std::cout << "height: " << height << std::endl;
std::cout << "pitch: " << pitch << std::endl;
std::cout << std::endl;
hipMemcpy2D(devPtr, pitch, a, host_orig_pitch, width * sizeof(float), height, hipMemcpyHostToDevice);
float b[width][height];
//load b and display array
std::cout << "b array: " << std::endl;
for (int i = 0; i < width; i++)
{
for (int j = 0; j < height; j++)
{
b[i][j] = 0;
std::cout << b[i][j] << " ";
}
std::cout << std::endl;
}
std::cout << std::endl;
//MyKernel<<<100, 512>>>(devPtr, pitch, width, height);
//hipDeviceSynchronize();
modify_i_j << <height, width >> >(width, height, pitch, devPtr, 1, 1, 5); //one block for one row
hipDeviceSynchronize();
//cudaMemcpy2d(dst, dPitch,src ,sPitch, width, height, typeOfCopy )
hipMemcpy2D(b, host_orig_pitch, devPtr, pitch, width * sizeof(float), height, hipMemcpyDeviceToHost);
// should be filled in with the values of array a.
std::cout << "returned array" << std::endl;
for (int i = 0; i < width; i++){
for (int j = 0; j < height; j++){
std::cout << b[i][j] << " ";
}
std::cout << std::endl;
}
std::cout << std::endl;
system("pause");
}
__global__ void modify_i_j(
int width, int height, int pitch,
float *d_array,
int i, int j, float change_to
){
//we want to change the [i,j]-th of the 2-dim array
int idx = blockIdx.x; //row
int idy = threadIdx.x; //column
//we can do index by pointer:
//if ((idx == i) && (idy == j)){
//float* row = (float *)((char*)d_array + idx*pitch);
// row[idy] = change_to;
//}
//or, a more convenient way is to do index just use idx and idy
if ((idx == i) && (idy == j))
{
d_array[idx*(pitch / sizeof(float)) + idy] = change_to;
}
}
void efdForward_1d_BM_cuda(float S, float tau){
//construct a proper grid for S
float ST_max = S + 4 * sqrt(tau);
int N = 2 * EFDN + 1;
float s = (ST_max - S) / (EFDN + 0.0);
float *h_P0;
h_P0 = new float[N];
//initialize the initial density
for (int i = 0; i < N; i++){
if (i == EFDN){
h_P0[i] = 1.0;//a point mass at S
}
else{
h_P0[i] = 0.0;
}
}
//time step
int n = 100;
float t = tau / n;
//coefficients from the PDE:
float pu = t / 2.0 / s / s;
float pd = t / 2.0 / s / s;
float pm = 1.0 - t / s / s;
//pass the grid to device:
float *d_P_0, *d_P_1; // Device Pointers
hipMalloc((void**)&d_P_0, N * sizeof(float));
hipMalloc((void**)&d_P_1, N * sizeof(float));
hipMemcpy(d_P_0, h_P0, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_P_1, h_P0, N*sizeof(float), hipMemcpyHostToDevice);
// forward:
for (int i = 0; i < n; i++)
{
if (i % 2 == 0)
{
EFD_1dBM << < 2, 40 >> >(
N,
d_P_0,
d_P_1,
pu, pm, pd
);
}
else
{
EFD_1dBM << <2, 40 >> >(
N,
d_P_1,
d_P_0, pu, pm, pd);
}
}
if ((n - 1) % 2 == 0){
hipMemcpy(h_P0, d_P_1, N * sizeof(float), hipMemcpyDeviceToHost);
}
else
{
hipMemcpy(h_P0, d_P_0, N * sizeof(float), hipMemcpyDeviceToHost);
}
std::cout << "the terminal density is:" << std::endl;
for (int i = 0; i < N; i++){
std::cout << S - EFDN*s + i*s << ": " << h_P0[i] << std::endl;
}
}
__global__ void EFD_1dBM(
int size,
float *d_val_n,
float *d_val_npo,
float Pu, float Pm, float Pd
){
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size)
{
d_val_npo[i] = Pu * d_val_n[i + 1] + Pm * d_val_n[i] + Pd * d_val_n[i - 1];
if (i == 0)
{
d_val_npo[i] = d_val_npo[1];
}
else if (i == size - 1)
{
d_val_npo[i] = d_val_npo[i - 1];
}
}
}
__global__ void EFD(
int size,
float *d_val_n,
float *d_val_npo,
float Pu, float Pm, float Pd,
float x0, float x
)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size)
{
d_val_npo[i] = Pu * d_val_n[i + 1] + Pm * d_val_n[i] + Pd * d_val_n[i - 1];
if (i == 0)
{
d_val_npo[i] = d_val_npo[1];
}
else if (i == size - 1)
{
d_val_npo[i] = d_val_npo[i - 1]
+ exp(x0 + x * (float(i / 2)))
- exp(x0 + x * (float(i / 2 - 1)));
}
}
}
void efdEuroCallPrice_cuda(float S0, float sigma, float tau, float K, float r){
//calculate all parameters in CPU
float Tolerance = .001;
float t = Tolerance / (1 + 3 * sigma*sigma);
int n = tau / t;//n time intervals horizontally
float x = sigma*sqrt(3 * t);
int myN = 4 * sigma*sqrt(tau) / x;// 2N+1 possible values vertically
std::cout << "myN=" << myN << std::endl;
std::cout << "n=" << n << std::endl;
float nu = r - .5*sigma*sigma;
float disc = exp(-r*t);//discount factor
float Pu = (sigma*sigma*t) / (2 * x*x) + (nu*t) / (2 * x);
float Pd = (sigma*sigma*t) / (2 * x*x) - (nu*t) / (2 * x);
float Pm = 1 - Pu - Pd;
Pu = Pu*disc;
Pm = Pm*disc;
Pd = Pd*disc;
float x0 = log(S0);
int SIZEOFARR = 2 * EFDN + 1;
// START NEW CODE
float *h_Price; // Host Pointer
float *d_Price_0, *d_Price_1; // Device Pointers
/* Generate Terminal Conditions in h_Price */
h_Price = new float[SIZEOFARR];
for (int i = 0; i < SIZEOFARR; i++){
float myx = x0 + x* (i + 1 - EFDN - 1);
float myS = exp(myx);
h_Price[i] = imax(0.0, myS - K);
//std::cout << "h[" << i << "]=" << h_Price[i] << std::endl;
}
/* C */
hipMalloc((void**)&d_Price_0, SIZEOFARR * sizeof(float));
hipMalloc((void**)&d_Price_1, SIZEOFARR * sizeof(float));
hipMemcpy(d_Price_0, h_Price, SIZEOFARR*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_Price_1, h_Price, SIZEOFARR*sizeof(float), hipMemcpyHostToDevice);
for (int i = n - 1; i >= 0; i--)
{
if (i % 2 == 0)
{
EFD << < 2, 40 >> >(
SIZEOFARR,
d_Price_0,
d_Price_1,
Pu, Pm, Pd, x0, x
);
}
else
{
EFD << <2, 40 >> >(
SIZEOFARR,
d_Price_1,
d_Price_0, Pu, Pm, Pd, x0, x);
}
}
hipMemcpy(h_Price, d_Price_1, SIZEOFARR * sizeof(float), hipMemcpyDeviceToHost);
std::cout << "the efd price from device is " << h_Price[EFDN] << std::endl;
delete[] h_Price;
}
void ifdEuroCallPrice_cuda(double S0, double sigma, double tau, double K, double r){
double Tolerance = .001;
double x = sqrt(Tolerance / 2.0);
double t = Tolerance / 2.0;
int n = tau / t;//n time intervals horizontally
int N = 4 * sigma*sqrt(tau) / x;
//int N = 100;
//std::cout << "in ifd, N=" << N << std::endl;
// 2N+1 possible values vertically
// cout<< "N="<<N<<endl<<"n="<<n<<endl;
double nu; nu = r - .5*sigma*sigma;
double alpha = -.5*t*(sigma*sigma / x / x + nu / x);
double beta = 1.0 + t*sigma*sigma / x / x + r*t;
double gamma = -.5*t*(sigma*sigma / x / x - nu / x);
/*
std::cout<<"alpha="<<alpha<<std::endl
<<"beta="<<beta<<std::endl
<<"gamma="<<gamma<<std::endl
<<alpha+beta<<std::endl;
*/
double x0; x0 = log(S0);
const int SIZEOFARR = 2 * EFDN + 1;
// set up the 3 vectors of the tridiagonal matrix
double *h_dl = (double*)malloc(SIZEOFARR*sizeof(double));
double *h_d = (double*)malloc(SIZEOFARR*sizeof(double));
double *h_du = (double*)malloc(SIZEOFARR*sizeof(double));
for (int i = 0; i < SIZEOFARR; i++){
if (i == 0){
//first row
h_dl[i] = 0.0;
h_d[i] = 1.0;
h_du[i] = -1.0;
}
else if (i == (SIZEOFARR - 1)){
//last row
h_dl[i] = 1.0;
h_d[i] = -1.0;
h_du[i] = 0.0;
}
else{
//other rows
h_dl[i] = alpha;
h_d[i] = beta;
h_du[i] = gamma;
}
}
double *d_dl; hipMalloc(&d_dl, SIZEOFARR*sizeof(double));
double *d_d; hipMalloc(&d_d, SIZEOFARR*sizeof(double));
double *d_du; hipMalloc(&d_du, SIZEOFARR*sizeof(double));
hipMemcpy(d_dl, h_dl, SIZEOFARR*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_d, h_d, SIZEOFARR*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_du, h_du, SIZEOFARR*sizeof(double), hipMemcpyHostToDevice);
/* Generate Terminal Conditions in h_Price */
double *h_Price = (double *)malloc(SIZEOFARR*sizeof(double));
//h_Price = new double[SIZEOFARR];
for (int i = 0; i < SIZEOFARR; i++){
double myx = x0 + x* ( EFDN - i );
double myS = exp(myx);
h_Price[i] = imax(0.0, myS - K);
}
double * d_Price; hipMalloc(&d_Price, SIZEOFARR*sizeof(double));
hipMemcpy(d_Price, h_Price, SIZEOFARR*sizeof(double), hipMemcpyHostToDevice);
double lambda_U = exp(x0 + x*EFDN) - exp(x0 + x*(EFDN - 1));
double lambda_L = 0.0;
//initialize cuSPARSE
hipsparseHandle_t handle; hipsparseCreate(&handle);
// sequential backward to the initial step, each step solves a tridiagonal system using cusparseSgtsv_nopivot
for (int i = n-1 ; i >= 0; i--){
//cusparseSgtsv_nopivot(hipsparseHandle_t handle, int m, int n, const double *dl, const double *d, const double *du, double *B, int ldb)
// a good example:
// https://github.com/OrangeOwlSolutions/Linear-Algebra/blob/master/SolveTridiagonalLinearSystem.cu
IFD_boundary << <2, 40 >> >(SIZEOFARR, d_Price, lambda_U, lambda_L);
cusparseDgtsv(handle, SIZEOFARR, 1, d_dl, d_d, d_du, d_Price, SIZEOFARR);
}
// get the middle one as the reslting price
hipMemcpy(h_Price, d_Price, SIZEOFARR * sizeof(double), hipMemcpyDeviceToHost);
std::cout << "the ifd price from device is " << h_Price[EFDN] << std::endl;
/*for (int i = 0; i < SIZEOFARR; i++){
std::cout << "h[" << i << "]=" << h_Price[i] << std::endl;
}*/
delete[] h_Price;
}
__global__ void IFD_boundary(
int size,
double *d_Price,
double lambda_U,
double lambda_L
)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size)
{
if (i == 0)//top condition
{
d_Price[i] = lambda_U;
}
else if (i == size - 1) //bottom condition
{
d_Price[i] = 0.0;
}
}
}
|
49296cb6d3c109622781e1294667031081a8d068.cu
|
#include "cuda_runtime.h"
#include "cusparse_v2.h"
#include "device_launch_parameters.h"
#include <cstdio>
#include <cstdlib>
#include <iostream>
#include <vector>
#include <cassert>
#define imin(a,b) (a<b?a:b)
#define imax(a,b) (a>b?a:b)
const int EFDN = 10;
void efdEuroCallPrice_cuda(float S, float sigma, float tau, float K, float r);
void efdForward_1d_BM_cuda(float S, float tau);
void efdForward_2d_BM_cuda(float S, float V, float tau);
void example2dArray();
void ifdEuroCallPrice_cuda(double S0, double sigma, double tau, double K, double r);
__global__ void EFD(
int size,
float *d_val_n,
float *d_val_npo,
float Pu, float Pm, float Pd,
float x0, float x
);
__global__ void EFD_1dBM(
int size,
float *d_val_n,
float *d_val_npo,
float Pu, float Pm, float Pd
);
__global__ void EFD_2dBM(
int width, int height, int pitch_n, int pitch_npo,
float *d_val_n,
float *d_val_npo,
float alpha, float beta
);
__global__ void modify_i_j(
int width, int height, int pitch,
float *d_array,
int i, int j, float change_to
);
__global__ void IFD_boundary(
int size,
double *d_Price,
double lambda_U,
double lambda_L
);
int main(void) {
//// Example 1: forward 1D BM
//efdForward_1d_BM_cuda(100, 2);
//// Example 2: backward 1d BS
float S0 = 358.52, sigma = 0.230967, tau = 0.145205, K = 360.0, r = 0.06;
efdEuroCallPrice_cuda(S0, sigma, tau, K, r);
// Example 3: 2D array example
//example2dArray();
// Example 4: forward 2D BM
//efdForward_2d_BM_cuda(100, 100, 2);
// Example 5: backward 1D BS
//float S0 = 358.52, sigma = 0.230967, tau = 0.145205, K = 360.0, r = 0.06;
ifdEuroCallPrice_cuda(S0, sigma, tau, K, r);
}
void efdForward_2d_BM_cuda(float S, float V, float tau){
//construct the 2D array
float ST_max = S + 4 * sqrt(tau);
const int width = 2 * EFDN + 1;
float s = (ST_max - S) / (EFDN + 0.0);
float VT_max = V + 4 * sqrt(tau);
const int height = 2 * EFDN + 1;
float v = (VT_max - V) / (EFDN + 0.0);
float h_P0[width][height];
//initial density:
for (int i = 0; i < width; i++){
for (int j = 0; j < height; j++){
h_P0[i][j] = 0.0;
}
}
h_P0[EFDN][EFDN] = 1.0;
//time step
int n = 100;
float t = tau / n;
//coefficients from the PDE:
float alpha = t / 2.0 / s / s;
float beta = t / 2.0 / v / v;
//pass the 2D grid to device
//what is pitch? http://stackoverflow.com/questions/16119943/how-and-when-should-i-use-pitched-pointer-with-the-cuda-api
size_t h_pitch = width * sizeof(float); //host original array pitch in bytes, number of bytes in one row
size_t d_pitch0, d_pitch1;// pitch for the device array
float *d_ptr0, *d_ptr1;
cudaMallocPitch(&d_ptr0, &d_pitch0, width * sizeof(float), height);
cudaMallocPitch(&d_ptr1, &d_pitch1, width * sizeof(float), height);
cudaMemcpy2D(d_ptr0, d_pitch0, h_P0, h_pitch, width * sizeof(float), height, cudaMemcpyHostToDevice);
cudaMemcpy2D(d_ptr1, d_pitch1, h_P0, h_pitch, width * sizeof(float), height, cudaMemcpyHostToDevice);
//calculate forward
for (int i = 0; i < n; i++)
{
if (i % 2 == 0)
{
EFD_2dBM << < height, width >> >(
width, height, d_pitch0, d_pitch1,
d_ptr0,
d_ptr1,
alpha, beta
);
}
else
{
EFD_2dBM << <height, width >> >(
width, height, d_pitch1, d_pitch0,
d_ptr1,
d_ptr0,
alpha, beta);
}
}
//copy the result back to the host
if ((n - 1) % 2 == 0){
//cudaMemcpy(h_P0, d_P_1, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy2D(h_P0, h_pitch, d_ptr1, d_pitch1, width * sizeof(float), height, cudaMemcpyDeviceToHost);
}
else
{
cudaMemcpy2D(h_P0, h_pitch, d_ptr0, d_pitch0, width * sizeof(float), height, cudaMemcpyDeviceToHost);
}
//output the result
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
//std::cout << h_P0[i][j] << "\t";
printf("%.2f\t", h_P0[i][j]);
}
std::cout << std::endl;
}
}
__global__ void EFD_2dBM(
int width, int height, int pitch_n, int pitch_npo,
float *d_val_n,
float *d_val_npo,
float alpha, float beta
){
int idx = blockIdx.x; //row
int idy = threadIdx.x; //column
if ((idx < height) && (idy <width)){
//d_val_npo[i] = Pu * d_val_n[i + 1] + Pm * d_val_n[i] + Pd * d_val_n[i - 1];
d_val_npo[idx*(pitch_npo / sizeof(float)) + idy] = alpha*(d_val_n[(idx + 1)*(pitch_n / sizeof(float)) + idy]
+ d_val_n[(idx - 1)*(pitch_n / sizeof(float)) + idy])
+ beta*(d_val_n[idx*(pitch_n / sizeof(float)) + idy + 1]
+ d_val_n[idx*(pitch_n / sizeof(float)) + idy - 1])
+ (1.0 - 2.0*alpha - 2.0*beta)*d_val_n[idx*(pitch_n / sizeof(float)) + idy];
//modify the ones on the top
if (idx == 0){
d_val_npo[idx*(pitch_npo / sizeof(float)) + idy] = d_val_npo[(idx + 1)*(pitch_npo / sizeof(float)) + idy];
}
//modify the ones on the bottom
if (idx == (height - 1)){
d_val_npo[idx*(pitch_npo / sizeof(float)) + idy] = d_val_npo[(idx - 1)*(pitch_npo / sizeof(float)) + idy];
}
//modify the ones on the left
if (idy == 0){
d_val_npo[idx*(pitch_npo / sizeof(float)) + idy] = d_val_npo[(idx - 1)*(pitch_npo / sizeof(float)) + idy + 1];
}
//modify the ones on the right
if (idx == (width - 1)){
d_val_npo[idx*(pitch_npo / sizeof(float)) + idy] = d_val_npo[(idx - 1)*(pitch_npo / sizeof(float)) + idy - 1];
}
}
}
void example2dArray(){
std::cout << "Host main" << std::endl;
// Host code
const int width = 3;
const int height = 3;
float* devPtr;
float a[width][height];
//load and display input array
std::cout << "a array: " << std::endl;
for (int i = 0; i < width; i++)
{
for (int j = 0; j < height; j++)
{
a[i][j] = i + j;
std::cout << a[i][j] << " ";
}
std::cout << std::endl;
}
std::cout << std::endl;
//Allocating Device memory for 2D array using pitch
size_t host_orig_pitch = width * sizeof(float); //host original array pitch in bytes
size_t pitch;// pitch for the device array
cudaMallocPitch(&devPtr, &pitch, width * sizeof(float), height);
std::cout << "host_orig_pitch: " << host_orig_pitch << std::endl;
std::cout << "sizeof(float): " << sizeof(float) << std::endl;
std::cout << "width: " << width << std::endl;
std::cout << "height: " << height << std::endl;
std::cout << "pitch: " << pitch << std::endl;
std::cout << std::endl;
cudaMemcpy2D(devPtr, pitch, a, host_orig_pitch, width * sizeof(float), height, cudaMemcpyHostToDevice);
float b[width][height];
//load b and display array
std::cout << "b array: " << std::endl;
for (int i = 0; i < width; i++)
{
for (int j = 0; j < height; j++)
{
b[i][j] = 0;
std::cout << b[i][j] << " ";
}
std::cout << std::endl;
}
std::cout << std::endl;
//MyKernel<<<100, 512>>>(devPtr, pitch, width, height);
//cudaThreadSynchronize();
modify_i_j << <height, width >> >(width, height, pitch, devPtr, 1, 1, 5); //one block for one row
cudaThreadSynchronize();
//cudaMemcpy2d(dst, dPitch,src ,sPitch, width, height, typeOfCopy )
cudaMemcpy2D(b, host_orig_pitch, devPtr, pitch, width * sizeof(float), height, cudaMemcpyDeviceToHost);
// should be filled in with the values of array a.
std::cout << "returned array" << std::endl;
for (int i = 0; i < width; i++){
for (int j = 0; j < height; j++){
std::cout << b[i][j] << " ";
}
std::cout << std::endl;
}
std::cout << std::endl;
system("pause");
}
__global__ void modify_i_j(
int width, int height, int pitch,
float *d_array,
int i, int j, float change_to
){
//we want to change the [i,j]-th of the 2-dim array
int idx = blockIdx.x; //row
int idy = threadIdx.x; //column
//we can do index by pointer:
//if ((idx == i) && (idy == j)){
//float* row = (float *)((char*)d_array + idx*pitch);
// row[idy] = change_to;
//}
//or, a more convenient way is to do index just use idx and idy
if ((idx == i) && (idy == j))
{
d_array[idx*(pitch / sizeof(float)) + idy] = change_to;
}
}
void efdForward_1d_BM_cuda(float S, float tau){
//construct a proper grid for S
float ST_max = S + 4 * sqrt(tau);
int N = 2 * EFDN + 1;
float s = (ST_max - S) / (EFDN + 0.0);
float *h_P0;
h_P0 = new float[N];
//initialize the initial density
for (int i = 0; i < N; i++){
if (i == EFDN){
h_P0[i] = 1.0;//a point mass at S
}
else{
h_P0[i] = 0.0;
}
}
//time step
int n = 100;
float t = tau / n;
//coefficients from the PDE:
float pu = t / 2.0 / s / s;
float pd = t / 2.0 / s / s;
float pm = 1.0 - t / s / s;
//pass the grid to device:
float *d_P_0, *d_P_1; // Device Pointers
cudaMalloc((void**)&d_P_0, N * sizeof(float));
cudaMalloc((void**)&d_P_1, N * sizeof(float));
cudaMemcpy(d_P_0, h_P0, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_P_1, h_P0, N*sizeof(float), cudaMemcpyHostToDevice);
// forward:
for (int i = 0; i < n; i++)
{
if (i % 2 == 0)
{
EFD_1dBM << < 2, 40 >> >(
N,
d_P_0,
d_P_1,
pu, pm, pd
);
}
else
{
EFD_1dBM << <2, 40 >> >(
N,
d_P_1,
d_P_0, pu, pm, pd);
}
}
if ((n - 1) % 2 == 0){
cudaMemcpy(h_P0, d_P_1, N * sizeof(float), cudaMemcpyDeviceToHost);
}
else
{
cudaMemcpy(h_P0, d_P_0, N * sizeof(float), cudaMemcpyDeviceToHost);
}
std::cout << "the terminal density is:" << std::endl;
for (int i = 0; i < N; i++){
std::cout << S - EFDN*s + i*s << ": " << h_P0[i] << std::endl;
}
}
__global__ void EFD_1dBM(
int size,
float *d_val_n,
float *d_val_npo,
float Pu, float Pm, float Pd
){
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size)
{
d_val_npo[i] = Pu * d_val_n[i + 1] + Pm * d_val_n[i] + Pd * d_val_n[i - 1];
if (i == 0)
{
d_val_npo[i] = d_val_npo[1];
}
else if (i == size - 1)
{
d_val_npo[i] = d_val_npo[i - 1];
}
}
}
__global__ void EFD(
int size,
float *d_val_n,
float *d_val_npo,
float Pu, float Pm, float Pd,
float x0, float x
)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size)
{
d_val_npo[i] = Pu * d_val_n[i + 1] + Pm * d_val_n[i] + Pd * d_val_n[i - 1];
if (i == 0)
{
d_val_npo[i] = d_val_npo[1];
}
else if (i == size - 1)
{
d_val_npo[i] = d_val_npo[i - 1]
+ exp(x0 + x * (float(i / 2)))
- exp(x0 + x * (float(i / 2 - 1)));
}
}
}
void efdEuroCallPrice_cuda(float S0, float sigma, float tau, float K, float r){
//calculate all parameters in CPU
float Tolerance = .001;
float t = Tolerance / (1 + 3 * sigma*sigma);
int n = tau / t;//n time intervals horizontally
float x = sigma*sqrt(3 * t);
int myN = 4 * sigma*sqrt(tau) / x;// 2N+1 possible values vertically
std::cout << "myN=" << myN << std::endl;
std::cout << "n=" << n << std::endl;
float nu = r - .5*sigma*sigma;
float disc = exp(-r*t);//discount factor
float Pu = (sigma*sigma*t) / (2 * x*x) + (nu*t) / (2 * x);
float Pd = (sigma*sigma*t) / (2 * x*x) - (nu*t) / (2 * x);
float Pm = 1 - Pu - Pd;
Pu = Pu*disc;
Pm = Pm*disc;
Pd = Pd*disc;
float x0 = log(S0);
int SIZEOFARR = 2 * EFDN + 1;
// START NEW CODE
float *h_Price; // Host Pointer
float *d_Price_0, *d_Price_1; // Device Pointers
/* Generate Terminal Conditions in h_Price */
h_Price = new float[SIZEOFARR];
for (int i = 0; i < SIZEOFARR; i++){
float myx = x0 + x* (i + 1 - EFDN - 1);
float myS = exp(myx);
h_Price[i] = imax(0.0, myS - K);
//std::cout << "h[" << i << "]=" << h_Price[i] << std::endl;
}
/* C */
cudaMalloc((void**)&d_Price_0, SIZEOFARR * sizeof(float));
cudaMalloc((void**)&d_Price_1, SIZEOFARR * sizeof(float));
cudaMemcpy(d_Price_0, h_Price, SIZEOFARR*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_Price_1, h_Price, SIZEOFARR*sizeof(float), cudaMemcpyHostToDevice);
for (int i = n - 1; i >= 0; i--)
{
if (i % 2 == 0)
{
EFD << < 2, 40 >> >(
SIZEOFARR,
d_Price_0,
d_Price_1,
Pu, Pm, Pd, x0, x
);
}
else
{
EFD << <2, 40 >> >(
SIZEOFARR,
d_Price_1,
d_Price_0, Pu, Pm, Pd, x0, x);
}
}
cudaMemcpy(h_Price, d_Price_1, SIZEOFARR * sizeof(float), cudaMemcpyDeviceToHost);
std::cout << "the efd price from device is " << h_Price[EFDN] << std::endl;
delete[] h_Price;
}
void ifdEuroCallPrice_cuda(double S0, double sigma, double tau, double K, double r){
double Tolerance = .001;
double x = sqrt(Tolerance / 2.0);
double t = Tolerance / 2.0;
int n = tau / t;//n time intervals horizontally
int N = 4 * sigma*sqrt(tau) / x;
//int N = 100;
//std::cout << "in ifd, N=" << N << std::endl;
// 2N+1 possible values vertically
// cout<< "N="<<N<<endl<<"n="<<n<<endl;
double nu; nu = r - .5*sigma*sigma;
double alpha = -.5*t*(sigma*sigma / x / x + nu / x);
double beta = 1.0 + t*sigma*sigma / x / x + r*t;
double gamma = -.5*t*(sigma*sigma / x / x - nu / x);
/*
std::cout<<"alpha="<<alpha<<std::endl
<<"beta="<<beta<<std::endl
<<"gamma="<<gamma<<std::endl
<<alpha+beta<<std::endl;
*/
double x0; x0 = log(S0);
const int SIZEOFARR = 2 * EFDN + 1;
// set up the 3 vectors of the tridiagonal matrix
double *h_dl = (double*)malloc(SIZEOFARR*sizeof(double));
double *h_d = (double*)malloc(SIZEOFARR*sizeof(double));
double *h_du = (double*)malloc(SIZEOFARR*sizeof(double));
for (int i = 0; i < SIZEOFARR; i++){
if (i == 0){
//first row
h_dl[i] = 0.0;
h_d[i] = 1.0;
h_du[i] = -1.0;
}
else if (i == (SIZEOFARR - 1)){
//last row
h_dl[i] = 1.0;
h_d[i] = -1.0;
h_du[i] = 0.0;
}
else{
//other rows
h_dl[i] = alpha;
h_d[i] = beta;
h_du[i] = gamma;
}
}
double *d_dl; cudaMalloc(&d_dl, SIZEOFARR*sizeof(double));
double *d_d; cudaMalloc(&d_d, SIZEOFARR*sizeof(double));
double *d_du; cudaMalloc(&d_du, SIZEOFARR*sizeof(double));
cudaMemcpy(d_dl, h_dl, SIZEOFARR*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_d, h_d, SIZEOFARR*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_du, h_du, SIZEOFARR*sizeof(double), cudaMemcpyHostToDevice);
/* Generate Terminal Conditions in h_Price */
double *h_Price = (double *)malloc(SIZEOFARR*sizeof(double));
//h_Price = new double[SIZEOFARR];
for (int i = 0; i < SIZEOFARR; i++){
double myx = x0 + x* ( EFDN - i );
double myS = exp(myx);
h_Price[i] = imax(0.0, myS - K);
}
double * d_Price; cudaMalloc(&d_Price, SIZEOFARR*sizeof(double));
cudaMemcpy(d_Price, h_Price, SIZEOFARR*sizeof(double), cudaMemcpyHostToDevice);
double lambda_U = exp(x0 + x*EFDN) - exp(x0 + x*(EFDN - 1));
double lambda_L = 0.0;
//initialize cuSPARSE
cusparseHandle_t handle; cusparseCreate(&handle);
// sequential backward to the initial step, each step solves a tridiagonal system using cusparseSgtsv_nopivot
for (int i = n-1 ; i >= 0; i--){
//cusparseSgtsv_nopivot(cusparseHandle_t handle, int m, int n, const double *dl, const double *d, const double *du, double *B, int ldb)
// a good example:
// https://github.com/OrangeOwlSolutions/Linear-Algebra/blob/master/SolveTridiagonalLinearSystem.cu
IFD_boundary << <2, 40 >> >(SIZEOFARR, d_Price, lambda_U, lambda_L);
cusparseDgtsv(handle, SIZEOFARR, 1, d_dl, d_d, d_du, d_Price, SIZEOFARR);
}
// get the middle one as the reslting price
cudaMemcpy(h_Price, d_Price, SIZEOFARR * sizeof(double), cudaMemcpyDeviceToHost);
std::cout << "the ifd price from device is " << h_Price[EFDN] << std::endl;
/*for (int i = 0; i < SIZEOFARR; i++){
std::cout << "h[" << i << "]=" << h_Price[i] << std::endl;
}*/
delete[] h_Price;
}
__global__ void IFD_boundary(
int size,
double *d_Price,
double lambda_U,
double lambda_L
)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size)
{
if (i == 0)//top condition
{
d_Price[i] = lambda_U;
}
else if (i == size - 1) //bottom condition
{
d_Price[i] = 0.0;
}
}
}
|
8f63806f0287bff46a641dde1a408dc794edf5ec.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
Pinned memory is used as a staging area for transfers from the device to the host. We can avoid
the cost of the transfer between pageable and pinned host arrays by directly allocating our host
arrays in pinned memory. Allocate pinned host memory in CUDA C/C++ using hipHostMalloc() or
hipHostMalloc(), and deallocate it with hipHostFree(). It is possible for pinned memory allocation
to fail, so you should always check for errors. The following code excerpt demonstrates allocation
of pinned memory with error checking.
*/
#include <iostream>
#include <assert.h>
#include <string>
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline hipError_t checkCuda(hipError_t result){
#if defined(DEBUG) || defined(_DEBUG)
if(result != hipSuccess){
std::cerr << "Cuda Runtime Error: " << hipGetErrorString(result) << std::endl;
assert(result == hipSuccess);
}
#endif
return result;
}
void profileCopies(float *h_a, float *h_b, float *d, unsigned int n, std::string desc){
std::cout << '\n' << desc <<" transfers\n";
unsigned int bytes = n * sizeof(float);
// Events timing
hipEvent_t startEvent, stopEvent;
checkCuda(hipEventCreate(&startEvent));
checkCuda(hipEventCreate(&stopEvent));
// Measuring transfer Host to Device
checkCuda(hipEventRecord(startEvent, 0));
checkCuda(hipMemcpy(d, h_a, bytes, hipMemcpyHostToDevice));
checkCuda(hipEventRecord(stopEvent, 0));
checkCuda(hipEventSynchronize(stopEvent));
float time;
checkCuda(hipEventElapsedTime(&time, startEvent, stopEvent));
std::cout << " Host to Device Bandwidth (GB/s): " << (bytes * 1e-6) / time << std::endl;
// Measuring transfer Device to Host
checkCuda(hipEventRecord(startEvent, 0));
checkCuda(hipMemcpy(h_b, d, bytes, hipMemcpyDeviceToHost));
checkCuda(hipEventRecord(stopEvent, 0));
checkCuda(hipEventSynchronize(stopEvent));
checkCuda(hipEventElapsedTime(&time, startEvent, stopEvent));
std::cout << " Device to Host Bandwidth (GB/s): " << (bytes * 1e-6) / time << std::endl;
// Check result
for(size_t i = 0; i != n; ++i){
if(h_a[i] != h_b[i]){
std::cout << " Transfers failed " << desc << std::endl;
break;
}
}
// Cleaning up events
checkCuda(hipEventDestroy(startEvent));
checkCuda(hipEventDestroy(stopEvent));
}
int main(int argc, char* argv[]){
uint32_t n = 4 * 1024 * 1024;
const uint32_t bytes = n * sizeof(float);
// Host arrays
float *h_aPageable, *h_bPageable;
float *h_aPinned, *h_bPinned;
// Device array
float *d_a;
// Allocate and initialize
h_aPageable = (float *)malloc(n * sizeof(float));
h_bPageable = (float *)malloc(n * sizeof(float));
checkCuda(hipHostMalloc((void**)&h_aPinned, bytes));
checkCuda(hipHostMalloc((void**)&h_bPinned, bytes));
checkCuda(hipMalloc((void**)&d_a, bytes));
// Out device info and transfer size
hipDeviceProp_t device;
checkCuda(hipGetDeviceProperties(&device, 0));
std::cout << "\n Device : " << device.name << std::endl;
std::cout << " Transfer size (MB): " << bytes / (1024 * 1024) << std::endl;
// Perform copies and report results
profileCopies(h_aPageable, h_bPageable, d_a, n, "Pageable");
profileCopies(h_aPinned, h_bPinned, d_a, n, "Pinned");
// Cleanup
hipFree(d_a);
hipHostFree(h_aPinned);
hipHostFree(h_bPinned);
free(h_aPageable);
free(h_bPageable);
return 0;
}
|
8f63806f0287bff46a641dde1a408dc794edf5ec.cu
|
/*
Pinned memory is used as a staging area for transfers from the device to the host. We can avoid
the cost of the transfer between pageable and pinned host arrays by directly allocating our host
arrays in pinned memory. Allocate pinned host memory in CUDA C/C++ using cudaMallocHost() or
cudaHostAlloc(), and deallocate it with cudaFreeHost(). It is possible for pinned memory allocation
to fail, so you should always check for errors. The following code excerpt demonstrates allocation
of pinned memory with error checking.
*/
#include <iostream>
#include <assert.h>
#include <string>
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline cudaError_t checkCuda(cudaError_t result){
#if defined(DEBUG) || defined(_DEBUG)
if(result != cudaSuccess){
std::cerr << "Cuda Runtime Error: " << cudaGetErrorString(result) << std::endl;
assert(result == cudaSuccess);
}
#endif
return result;
}
void profileCopies(float *h_a, float *h_b, float *d, unsigned int n, std::string desc){
std::cout << '\n' << desc <<" transfers\n";
unsigned int bytes = n * sizeof(float);
// Events timing
cudaEvent_t startEvent, stopEvent;
checkCuda(cudaEventCreate(&startEvent));
checkCuda(cudaEventCreate(&stopEvent));
// Measuring transfer Host to Device
checkCuda(cudaEventRecord(startEvent, 0));
checkCuda(cudaMemcpy(d, h_a, bytes, cudaMemcpyHostToDevice));
checkCuda(cudaEventRecord(stopEvent, 0));
checkCuda(cudaEventSynchronize(stopEvent));
float time;
checkCuda(cudaEventElapsedTime(&time, startEvent, stopEvent));
std::cout << " Host to Device Bandwidth (GB/s): " << (bytes * 1e-6) / time << std::endl;
// Measuring transfer Device to Host
checkCuda(cudaEventRecord(startEvent, 0));
checkCuda(cudaMemcpy(h_b, d, bytes, cudaMemcpyDeviceToHost));
checkCuda(cudaEventRecord(stopEvent, 0));
checkCuda(cudaEventSynchronize(stopEvent));
checkCuda(cudaEventElapsedTime(&time, startEvent, stopEvent));
std::cout << " Device to Host Bandwidth (GB/s): " << (bytes * 1e-6) / time << std::endl;
// Check result
for(size_t i = 0; i != n; ++i){
if(h_a[i] != h_b[i]){
std::cout << " Transfers failed " << desc << std::endl;
break;
}
}
// Cleaning up events
checkCuda(cudaEventDestroy(startEvent));
checkCuda(cudaEventDestroy(stopEvent));
}
int main(int argc, char* argv[]){
uint32_t n = 4 * 1024 * 1024;
const uint32_t bytes = n * sizeof(float);
// Host arrays
float *h_aPageable, *h_bPageable;
float *h_aPinned, *h_bPinned;
// Device array
float *d_a;
// Allocate and initialize
h_aPageable = (float *)malloc(n * sizeof(float));
h_bPageable = (float *)malloc(n * sizeof(float));
checkCuda(cudaMallocHost((void**)&h_aPinned, bytes));
checkCuda(cudaMallocHost((void**)&h_bPinned, bytes));
checkCuda(cudaMalloc((void**)&d_a, bytes));
// Out device info and transfer size
cudaDeviceProp device;
checkCuda(cudaGetDeviceProperties(&device, 0));
std::cout << "\n Device : " << device.name << std::endl;
std::cout << " Transfer size (MB): " << bytes / (1024 * 1024) << std::endl;
// Perform copies and report results
profileCopies(h_aPageable, h_bPageable, d_a, n, "Pageable");
profileCopies(h_aPinned, h_bPinned, d_a, n, "Pinned");
// Cleanup
cudaFree(d_a);
cudaFreeHost(h_aPinned);
cudaFreeHost(h_bPinned);
free(h_aPageable);
free(h_bPageable);
return 0;
}
|
3536d2f2743121ac6eef1eb79ef1db5b6c4c4169.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include "star2d1r-256-10-128_kernel.hu"
#define BENCH_DIM 2
#define BENCH_FPP 9
#define BENCH_RAD 1
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 3 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
hipError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != hipSuccess) { \
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == hipSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(hipGetLastError()); \
} while(0)
float *dev_A;
cudaCheckReturn(hipMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float)));
{
cudaCheckReturn(hipMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyHostToDevice));
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 10;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 236;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
hipLaunchKernelGGL(( kernel0_10), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 254;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 254;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 254;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 4)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 5)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 6)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 7)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 8)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 9)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 254;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 4)
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 5)
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 6)
{
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 244;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_6), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 7)
{
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 242;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_7), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 8)
{
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 240;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_8), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 9)
{
const AN5D_TYPE __side0Len = 9;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 238;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_9), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(hipMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyDeviceToHost));
}
cudaCheckReturn(hipFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
0.1873f * A[t%2][i-1][j]
+ 0.1876f * A[t%2][i][j-1]
+ 0.2500f * A[t%2][i][j]
+ 0.1877f * A[t%2][i][j+1]
+ 0.1874f * A[t%2][i+1][j];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
3536d2f2743121ac6eef1eb79ef1db5b6c4c4169.cu
|
#include <assert.h>
#include <stdio.h>
#include "star2d1r-256-10-128_kernel.hu"
#define BENCH_DIM 2
#define BENCH_FPP 9
#define BENCH_RAD 1
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 3 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
cudaError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != cudaSuccess) { \
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == cudaSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(cudaGetLastError()); \
} while(0)
float *dev_A;
cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float)));
{
cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyHostToDevice));
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 10;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 236;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
kernel0_10<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 254;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 254;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 254;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 4)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 5)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 6)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 7)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 8)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 9)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 254;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 4)
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 5)
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 6)
{
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 244;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_6<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 7)
{
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 242;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_7<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 8)
{
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 240;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_8<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 9)
{
const AN5D_TYPE __side0Len = 9;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 238;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_9<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyDeviceToHost));
}
cudaCheckReturn(cudaFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
0.1873f * A[t%2][i-1][j]
+ 0.1876f * A[t%2][i][j-1]
+ 0.2500f * A[t%2][i][j]
+ 0.1877f * A[t%2][i][j+1]
+ 0.1874f * A[t%2][i+1][j];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
6b0149e77868e309899b92ddb8faeec269a88628.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
Fractal code for CS 4380 / CS 5351
Copyright (c) 2016, Texas State University. All rights reserved.
Redistribution in source or binary form, with or without modification,
is not permitted. Use in source and binary forms, with or without
modification, is only permitted for academic use in CS 4380 or CS 5351
at Texas State University.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Author: Martin Burtscher & Luis Gomez(lg1336)
*/
#include <cstdlib>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include "cs43805351.h"
static const int ThreadsPerBlock = 512;
static const double Delta = 0.005491;
static const double xMid = 0.745796;
static const double yMid = 0.105089;
static __global__
void FractalKernel(const int frames, const int width, unsigned char pic[])
{
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < frames * (width * width)) {
const int col = idx % width;
const int row = (idx / width) % width;
const int frame = idx / (width * width);
const double delta = Delta * pow(0.99, frame + 1);
//todo: compute a single pixel here
const double xMin = xMid - delta;
const double yMin = yMid - delta;
const double dw = 2.0 * delta / width;
const double cy = -yMin - row * dw;
const double cx = -xMin - col * dw;
double x = cx;
double y = cy;
int depth = 256;
double x2, y2;
do {
x2 = x * x;
y2 = y * y;
y = 2 * x * y + cy;
x = x2 - y2 + cx;
depth--;
} while ((depth > 0) && ((x2 + y2) < 5.0));
pic[frame * width * width + row * width + col] = (unsigned char)depth;
}
}
int main(int argc, char *argv[])
{
printf("Fractal v1.5 [CUDA]\n");
// check command line
if (argc != 3){
fprintf(stderr, "usage: %s frame_width num_frames\n", argv[0]);
exit(-1);
}
int width = atoi(argv[1]);
if (width < 10){
fprintf(stderr, "error: frame_width must be at least 10\n");
exit(-1);
}
int frames = atoi(argv[2]);
if (frames < 1){
fprintf(stderr, "error: num_frames must be at least 1\n");
exit(-1);
}
printf("computing %d frames of %d by %d fractal\n", frames, width, width);
// allocate picture array
unsigned char* pic = new unsigned char[frames * width * width];
unsigned char* pic_d;
if (hipSuccess != hipMalloc((void **)&pic_d, frames * width * width * sizeof(unsigned char))){
fprintf(stderr, "could not allocate memory\n");
exit(-1);
}
// start time
struct timeval start, end;
gettimeofday(&start, NULL);
//computing # of blocks needed
int numb_blocks = ((width * width * frames) + ThreadsPerBlock - 1) / ThreadsPerBlock;
//todo: call FractalKernel here
hipLaunchKernelGGL(( FractalKernel), dim3(numb_blocks) , dim3(ThreadsPerBlock), 0, 0, frames, width, pic_d);
if (hipSuccess != hipMemcpy(pic, pic_d, frames * width * width *
sizeof(unsigned char), hipMemcpyDeviceToHost)){
fprintf(stderr, "copying from device failed\n");
exit(-1);
}
// end time
gettimeofday(&end, NULL);
double runtime = end.tv_sec + end.tv_usec / 1000000.0 - start.tv_sec - start.tv_usec / 1000000.0;
printf("compute time: %.4f s\n", runtime);
// verify result by writing frames to BMP files
if((width <= 400) && (frames <= 30)){
for(int frame = 0; frame < frames; frame++) {
char name[32];
sprintf(name, "fractal%d.bmp", frame + 1000);
writeBMP(width, width, &pic[frame * width * width], name);
}
}
delete [] pic;
hipFree(pic_d);
return 0;
}
|
6b0149e77868e309899b92ddb8faeec269a88628.cu
|
/*
Fractal code for CS 4380 / CS 5351
Copyright (c) 2016, Texas State University. All rights reserved.
Redistribution in source or binary form, with or without modification,
is not permitted. Use in source and binary forms, with or without
modification, is only permitted for academic use in CS 4380 or CS 5351
at Texas State University.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Author: Martin Burtscher & Luis Gomez(lg1336)
*/
#include <cstdlib>
#include <sys/time.h>
#include <cuda.h>
#include "cs43805351.h"
static const int ThreadsPerBlock = 512;
static const double Delta = 0.005491;
static const double xMid = 0.745796;
static const double yMid = 0.105089;
static __global__
void FractalKernel(const int frames, const int width, unsigned char pic[])
{
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < frames * (width * width)) {
const int col = idx % width;
const int row = (idx / width) % width;
const int frame = idx / (width * width);
const double delta = Delta * pow(0.99, frame + 1);
//todo: compute a single pixel here
const double xMin = xMid - delta;
const double yMin = yMid - delta;
const double dw = 2.0 * delta / width;
const double cy = -yMin - row * dw;
const double cx = -xMin - col * dw;
double x = cx;
double y = cy;
int depth = 256;
double x2, y2;
do {
x2 = x * x;
y2 = y * y;
y = 2 * x * y + cy;
x = x2 - y2 + cx;
depth--;
} while ((depth > 0) && ((x2 + y2) < 5.0));
pic[frame * width * width + row * width + col] = (unsigned char)depth;
}
}
int main(int argc, char *argv[])
{
printf("Fractal v1.5 [CUDA]\n");
// check command line
if (argc != 3){
fprintf(stderr, "usage: %s frame_width num_frames\n", argv[0]);
exit(-1);
}
int width = atoi(argv[1]);
if (width < 10){
fprintf(stderr, "error: frame_width must be at least 10\n");
exit(-1);
}
int frames = atoi(argv[2]);
if (frames < 1){
fprintf(stderr, "error: num_frames must be at least 1\n");
exit(-1);
}
printf("computing %d frames of %d by %d fractal\n", frames, width, width);
// allocate picture array
unsigned char* pic = new unsigned char[frames * width * width];
unsigned char* pic_d;
if (cudaSuccess != cudaMalloc((void **)&pic_d, frames * width * width * sizeof(unsigned char))){
fprintf(stderr, "could not allocate memory\n");
exit(-1);
}
// start time
struct timeval start, end;
gettimeofday(&start, NULL);
//computing # of blocks needed
int numb_blocks = ((width * width * frames) + ThreadsPerBlock - 1) / ThreadsPerBlock;
//todo: call FractalKernel here
FractalKernel<<<numb_blocks , ThreadsPerBlock>>>(frames, width, pic_d);
if (cudaSuccess != cudaMemcpy(pic, pic_d, frames * width * width *
sizeof(unsigned char), cudaMemcpyDeviceToHost)){
fprintf(stderr, "copying from device failed\n");
exit(-1);
}
// end time
gettimeofday(&end, NULL);
double runtime = end.tv_sec + end.tv_usec / 1000000.0 - start.tv_sec - start.tv_usec / 1000000.0;
printf("compute time: %.4f s\n", runtime);
// verify result by writing frames to BMP files
if((width <= 400) && (frames <= 30)){
for(int frame = 0; frame < frames; frame++) {
char name[32];
sprintf(name, "fractal%d.bmp", frame + 1000);
writeBMP(width, width, &pic[frame * width * width], name);
}
}
delete [] pic;
cudaFree(pic_d);
return 0;
}
|
16cac2026b816ab8dada6524fb1ea3c97ebef5b4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#define BLOCK_SIZE 512
__global__ void reduction(float *out, float *in, unsigned size)
{
/********************************************************************
Load a segment of the input vector into shared memory
Traverse the reduction tree
Write the computed sum to the output vector at the correct index
********************************************************************/
// INSERT KERNEL CODE HERE
__shared__ float outArr[2*BLOCK_SIZE];
int tx = threadIdx.x;
int i = (2* blockIdx.x * blockDim.x) + tx;
outArr[tx] = 0.0;
if(i < size){
outArr[tx] = in[i];
}
outArr[BLOCK_SIZE + tx] = 0.0;
if(i + BLOCK_SIZE < size)
outArr[BLOCK_SIZE + tx] = in[i + BLOCK_SIZE];
__syncthreads();
for (int offset = BLOCK_SIZE; offset > 0; offset >>= 1) {
if (tx < offset)
outArr[tx] += outArr[tx + offset];
__syncthreads();
}
if(tx == 0)
out[blockIdx.x] = outArr[0];
__syncthreads();
}
|
16cac2026b816ab8dada6524fb1ea3c97ebef5b4.cu
|
/******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#define BLOCK_SIZE 512
__global__ void reduction(float *out, float *in, unsigned size)
{
/********************************************************************
Load a segment of the input vector into shared memory
Traverse the reduction tree
Write the computed sum to the output vector at the correct index
********************************************************************/
// INSERT KERNEL CODE HERE
__shared__ float outArr[2*BLOCK_SIZE];
int tx = threadIdx.x;
int i = (2* blockIdx.x * blockDim.x) + tx;
outArr[tx] = 0.0;
if(i < size){
outArr[tx] = in[i];
}
outArr[BLOCK_SIZE + tx] = 0.0;
if(i + BLOCK_SIZE < size)
outArr[BLOCK_SIZE + tx] = in[i + BLOCK_SIZE];
__syncthreads();
for (int offset = BLOCK_SIZE; offset > 0; offset >>= 1) {
if (tx < offset)
outArr[tx] += outArr[tx + offset];
__syncthreads();
}
if(tx == 0)
out[blockIdx.x] = outArr[0];
__syncthreads();
}
|
a0863ce39b2323ab802f031e12e490cf43290490.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
Author: Cao Thanh Tung, Ashwin Nanjappa
Date: 05-Aug-2014
===============================================================================
Copyright (c) 2011, School of Computing, National University of Singapore.
All rights reserved.
Project homepage: http://www.comp.nus.edu.sg/~tants/gdel3d.html
If you use gDel3D and you like it or have comments on its usefulness etc., we
would love to hear from you at <[email protected]>. You may share with us
your experience and any possibilities that we may improve the work/code.
===============================================================================
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of
conditions and the following disclaimer. Redistributions in binary form must reproduce
the above copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the distribution.
Neither the name of the National University of Singapore nor the names of its contributors
may be used to endorse or promote products derived from this software without specific
prior written permission from the National University of Singapore.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
*/
#include "ThrustWrapper.h"
#include <map>
#include <thrust/system/hip/execution_policy.h>
class CachedAllocator
{
private:
const int BlockSize;
typedef std::multimap< std::ptrdiff_t, char * > FreeBlocks;
typedef std::map< char *, std::ptrdiff_t > AllocBlocks;
FreeBlocks freeBlocks;
AllocBlocks allocBlocks;
public:
// just allocate bytes
typedef char value_type;
CachedAllocator()
: BlockSize( 4096 ) {}
void freeAll()
{
size_t totalSize = 0;
// deallocate all outstanding blocks in both lists
for( FreeBlocks::iterator i = freeBlocks.begin();
i != freeBlocks.end();
++i )
{
hipFree( i->second );
totalSize += i->first;
}
for( AllocBlocks::iterator i = allocBlocks.begin();
i != allocBlocks.end();
++i )
{
hipFree( i->first );
totalSize += i->second;
}
freeBlocks.clear();
allocBlocks.clear();
//std::cout << "*** CacheAllocator size: "
// << freeBlocks.size() + allocBlocks.size()
// << " Size in bytes: " << totalSize << std::endl;
}
char *allocate( std::ptrdiff_t numBytes )
{
char *result = 0;
numBytes = ( ( numBytes - 1 ) / BlockSize + 1 ) * BlockSize;
// search the cache for a free block
FreeBlocks::iterator freeBlock = freeBlocks.find( numBytes );
if( freeBlock != freeBlocks.end() )
{
//std::cout << "CachedAllocator: found a hit " << numBytes << std::endl;
result = freeBlock->second;
freeBlocks.erase( freeBlock );
}
else
{
// no allocation of the right size exists
// create a new one with cuda::malloc
// throw if cuda::malloc can't satisfy the request
try
{
//std::cout << "CachedAllocator: no free block found; calling hipMalloc " << numBytes << std::endl;
// allocate memory and convert cuda::pointer to raw pointer
result = thrust::device_malloc<char>( numBytes ).get();
}
catch( std::runtime_error &e )
{
// output an error message and exit
std::cerr << "thrust::device_malloc failed to allocate " << numBytes << " bytes!" << std::endl;
exit( -1 );
}
}
// insert the allocated pointer into the allocated_blocks map
allocBlocks.insert( std::make_pair( result, numBytes ) );
return result;
}
void deallocate( char *ptr, size_t n )
{
// erase the allocated block from the allocated blocks map
AllocBlocks::iterator iter = allocBlocks.find( ptr );
std::ptrdiff_t numBytes = iter->second;
allocBlocks.erase(iter);
// insert the block into the free blocks map
freeBlocks.insert( std::make_pair( numBytes, ptr ) );
}
};
// the cache is simply a global variable
CachedAllocator thrustAllocator;
void thrust_free_all()
{
thrustAllocator.freeAll();
}
///////////////////////////////////////////////////////////////////////////////
void thrust_sort_by_key
(
DevVector<int>::iterator keyBeg,
DevVector<int>::iterator keyEnd,
thrust::zip_iterator<
thrust::tuple<
DevVector<int>::iterator,
DevVector<Point3>::iterator > > valueBeg
)
{
thrust::sort_by_key(
//thrust::hip::par( thrustAllocator ),
keyBeg, keyEnd, valueBeg );
}
void thrust_transform_GetMortonNumber
(
DevVector<Point3>::iterator inBeg,
DevVector<Point3>::iterator inEnd,
DevVector<int>::iterator outBeg,
RealType minVal,
RealType maxVal
)
{
thrust::transform(
thrust::hip::par( thrustAllocator ),
inBeg, inEnd, outBeg, GetMortonNumber( minVal, maxVal ) );
}
// Convert count vector with its map
// Also calculate the sum of input vector
// Input: [ 4 2 0 5 ]
// Output: [ 4 6 6 11 ] Sum: 11
int makeInPlaceIncMapAndSum
(
IntDVec& inVec
)
{
thrust::inclusive_scan(
thrust::hip::par( thrustAllocator ),
inVec.begin(), inVec.end(), inVec.begin() );
const int sum = inVec[ inVec.size() - 1 ];
return sum;
}
int compactIfNegative
(
DevVector<int>& inVec
)
{
inVec.erase(
thrust::remove_if(
//thrust::hip::par( thrustAllocator ),
inVec.begin(),
inVec.end(), IsNegative() ),
inVec.end() );
return inVec.size();
}
int compactIfNegative
(
DevVector<int>& inVec,
DevVector<int>& temp
)
{
temp.resize( inVec.size() );
temp.erase(
thrust::copy_if(
thrust::hip::par( thrustAllocator ),
inVec.begin(),
inVec.end(),
temp.begin(),
IsNotNegative() ),
temp.end() );
inVec.swap( temp );
return (int) inVec.size();
}
void compactBothIfNegative
(
IntDVec& vec0,
IntDVec& vec1
)
{
assert( ( vec0.size() == vec1.size() ) && "Vectors should be equal size!" );
const IntZipDIter newEnd =
thrust::remove_if(
//thrust::hip::par( thrustAllocator ),
thrust::make_zip_iterator( thrust::make_tuple( vec0.begin(), vec1.begin() ) ),
thrust::make_zip_iterator( thrust::make_tuple( vec0.end(), vec1.end() ) ),
IsIntTuple2Negative() );
const IntDIterTuple2 endTuple = newEnd.get_iterator_tuple();
vec0.erase( thrust::get<0>( endTuple ), vec0.end() );
vec1.erase( thrust::get<1>( endTuple ), vec1.end() );
return;
}
int thrust_copyIf_IsActiveTetra
(
const CharDVec& inVec,
IntDVec& outVec
)
{
thrust::counting_iterator<int> first( 0 );
thrust::counting_iterator<int> last = first + inVec.size();
outVec.resize( inVec.size() );
outVec.erase(
thrust::copy_if(
thrust::hip::par( thrustAllocator ),
first, last,
inVec.begin(),
outVec.begin(),
IsTetActive() ),
outVec.end()
);
return outVec.size();
}
int thrust_copyIf_Insertable
(
const IntDVec& stencil,
IntDVec& outVec
)
{
thrust::counting_iterator<int> first( 0 );
thrust::counting_iterator<int> last = first + stencil.size();
outVec.resize( stencil.size() );
outVec.erase(
thrust::copy_if(
thrust::hip::par( thrustAllocator ),
first, last,
stencil.begin(),
outVec.begin(),
IsNegative() ),
outVec.end()
);
return outVec.size();
}
|
a0863ce39b2323ab802f031e12e490cf43290490.cu
|
/*
Author: Cao Thanh Tung, Ashwin Nanjappa
Date: 05-Aug-2014
===============================================================================
Copyright (c) 2011, School of Computing, National University of Singapore.
All rights reserved.
Project homepage: http://www.comp.nus.edu.sg/~tants/gdel3d.html
If you use gDel3D and you like it or have comments on its usefulness etc., we
would love to hear from you at <[email protected]>. You may share with us
your experience and any possibilities that we may improve the work/code.
===============================================================================
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of
conditions and the following disclaimer. Redistributions in binary form must reproduce
the above copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the distribution.
Neither the name of the National University of Singapore nor the names of its contributors
may be used to endorse or promote products derived from this software without specific
prior written permission from the National University of Singapore.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
*/
#include "ThrustWrapper.h"
#include <map>
#include <thrust/system/cuda/execution_policy.h>
class CachedAllocator
{
private:
const int BlockSize;
typedef std::multimap< std::ptrdiff_t, char * > FreeBlocks;
typedef std::map< char *, std::ptrdiff_t > AllocBlocks;
FreeBlocks freeBlocks;
AllocBlocks allocBlocks;
public:
// just allocate bytes
typedef char value_type;
CachedAllocator()
: BlockSize( 4096 ) {}
void freeAll()
{
size_t totalSize = 0;
// deallocate all outstanding blocks in both lists
for( FreeBlocks::iterator i = freeBlocks.begin();
i != freeBlocks.end();
++i )
{
cudaFree( i->second );
totalSize += i->first;
}
for( AllocBlocks::iterator i = allocBlocks.begin();
i != allocBlocks.end();
++i )
{
cudaFree( i->first );
totalSize += i->second;
}
freeBlocks.clear();
allocBlocks.clear();
//std::cout << "*** CacheAllocator size: "
// << freeBlocks.size() + allocBlocks.size()
// << " Size in bytes: " << totalSize << std::endl;
}
char *allocate( std::ptrdiff_t numBytes )
{
char *result = 0;
numBytes = ( ( numBytes - 1 ) / BlockSize + 1 ) * BlockSize;
// search the cache for a free block
FreeBlocks::iterator freeBlock = freeBlocks.find( numBytes );
if( freeBlock != freeBlocks.end() )
{
//std::cout << "CachedAllocator: found a hit " << numBytes << std::endl;
result = freeBlock->second;
freeBlocks.erase( freeBlock );
}
else
{
// no allocation of the right size exists
// create a new one with cuda::malloc
// throw if cuda::malloc can't satisfy the request
try
{
//std::cout << "CachedAllocator: no free block found; calling cudaMalloc " << numBytes << std::endl;
// allocate memory and convert cuda::pointer to raw pointer
result = thrust::device_malloc<char>( numBytes ).get();
}
catch( std::runtime_error &e )
{
// output an error message and exit
std::cerr << "thrust::device_malloc failed to allocate " << numBytes << " bytes!" << std::endl;
exit( -1 );
}
}
// insert the allocated pointer into the allocated_blocks map
allocBlocks.insert( std::make_pair( result, numBytes ) );
return result;
}
void deallocate( char *ptr, size_t n )
{
// erase the allocated block from the allocated blocks map
AllocBlocks::iterator iter = allocBlocks.find( ptr );
std::ptrdiff_t numBytes = iter->second;
allocBlocks.erase(iter);
// insert the block into the free blocks map
freeBlocks.insert( std::make_pair( numBytes, ptr ) );
}
};
// the cache is simply a global variable
CachedAllocator thrustAllocator;
void thrust_free_all()
{
thrustAllocator.freeAll();
}
///////////////////////////////////////////////////////////////////////////////
void thrust_sort_by_key
(
DevVector<int>::iterator keyBeg,
DevVector<int>::iterator keyEnd,
thrust::zip_iterator<
thrust::tuple<
DevVector<int>::iterator,
DevVector<Point3>::iterator > > valueBeg
)
{
thrust::sort_by_key(
//thrust::cuda::par( thrustAllocator ),
keyBeg, keyEnd, valueBeg );
}
void thrust_transform_GetMortonNumber
(
DevVector<Point3>::iterator inBeg,
DevVector<Point3>::iterator inEnd,
DevVector<int>::iterator outBeg,
RealType minVal,
RealType maxVal
)
{
thrust::transform(
thrust::cuda::par( thrustAllocator ),
inBeg, inEnd, outBeg, GetMortonNumber( minVal, maxVal ) );
}
// Convert count vector with its map
// Also calculate the sum of input vector
// Input: [ 4 2 0 5 ]
// Output: [ 4 6 6 11 ] Sum: 11
int makeInPlaceIncMapAndSum
(
IntDVec& inVec
)
{
thrust::inclusive_scan(
thrust::cuda::par( thrustAllocator ),
inVec.begin(), inVec.end(), inVec.begin() );
const int sum = inVec[ inVec.size() - 1 ];
return sum;
}
int compactIfNegative
(
DevVector<int>& inVec
)
{
inVec.erase(
thrust::remove_if(
//thrust::cuda::par( thrustAllocator ),
inVec.begin(),
inVec.end(), IsNegative() ),
inVec.end() );
return inVec.size();
}
int compactIfNegative
(
DevVector<int>& inVec,
DevVector<int>& temp
)
{
temp.resize( inVec.size() );
temp.erase(
thrust::copy_if(
thrust::cuda::par( thrustAllocator ),
inVec.begin(),
inVec.end(),
temp.begin(),
IsNotNegative() ),
temp.end() );
inVec.swap( temp );
return (int) inVec.size();
}
void compactBothIfNegative
(
IntDVec& vec0,
IntDVec& vec1
)
{
assert( ( vec0.size() == vec1.size() ) && "Vectors should be equal size!" );
const IntZipDIter newEnd =
thrust::remove_if(
//thrust::cuda::par( thrustAllocator ),
thrust::make_zip_iterator( thrust::make_tuple( vec0.begin(), vec1.begin() ) ),
thrust::make_zip_iterator( thrust::make_tuple( vec0.end(), vec1.end() ) ),
IsIntTuple2Negative() );
const IntDIterTuple2 endTuple = newEnd.get_iterator_tuple();
vec0.erase( thrust::get<0>( endTuple ), vec0.end() );
vec1.erase( thrust::get<1>( endTuple ), vec1.end() );
return;
}
int thrust_copyIf_IsActiveTetra
(
const CharDVec& inVec,
IntDVec& outVec
)
{
thrust::counting_iterator<int> first( 0 );
thrust::counting_iterator<int> last = first + inVec.size();
outVec.resize( inVec.size() );
outVec.erase(
thrust::copy_if(
thrust::cuda::par( thrustAllocator ),
first, last,
inVec.begin(),
outVec.begin(),
IsTetActive() ),
outVec.end()
);
return outVec.size();
}
int thrust_copyIf_Insertable
(
const IntDVec& stencil,
IntDVec& outVec
)
{
thrust::counting_iterator<int> first( 0 );
thrust::counting_iterator<int> last = first + stencil.size();
outVec.resize( stencil.size() );
outVec.erase(
thrust::copy_if(
thrust::cuda::par( thrustAllocator ),
first, last,
stencil.begin(),
outVec.begin(),
IsNegative() ),
outVec.end()
);
return outVec.size();
}
|
fe912a7b887cc95190761c4357a0537f8b3eb436.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include <glog/logging.h>
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/weighted_softmax_loss_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label,
const bool weight_by_label_freqs, const float* label_counts,
Dtype* loss, const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
if (weight_by_label_freqs) {
loss[index] *= static_cast<Dtype>(label_counts[label_value]);
}
counts[index] = 1;
}
}
}
template <typename Dtype>
void WeightedSoftmaxWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
const float* label_count_data =
weight_by_label_freqs_ ? label_counts_.gpu_data() : NULL;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SoftmaxLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label,
weight_by_label_freqs_, label_count_data , loss_data,
outer_num_, dim, inner_num_,
has_ignore_label_, ignore_label_, counts);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
if (normalize_) {
Dtype count;
caffe_gpu_asum(nthreads, counts, &count);
loss /= count;
} else {
loss /= outer_num_;
}
top[0]->mutable_cpu_data()[0] = loss;
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, const bool weight_by_label_freqs,
const float* label_counts, Dtype* bottom_diff,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
const int idx = n * dim + label_value * spatial_dim + s;
bottom_diff[idx] -= 1;
if (weight_by_label_freqs) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] *= static_cast<Dtype>(label_counts[label_value]);
}
}
counts[index] = 1;
}
}
}
template <typename Dtype>
void WeightedSoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
const float* label_count_data =
weight_by_label_freqs_ ? label_counts_.gpu_data() : NULL;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SoftmaxLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label,
weight_by_label_freqs_, label_count_data, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_,
ignore_label_, counts);
const Dtype loss_weight = top[0]->cpu_diff()[0];
if (normalize_) {
Dtype count;
caffe_gpu_asum(nthreads, counts, &count);
caffe_gpu_scal(prob_.count(), loss_weight / count, bottom_diff);
} else {
caffe_gpu_scal(prob_.count(), loss_weight / outer_num_, bottom_diff);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(WeightedSoftmaxWithLossLayer);
} // namespace caffe
|
fe912a7b887cc95190761c4357a0537f8b3eb436.cu
|
#include <algorithm>
#include <cfloat>
#include <vector>
#include <glog/logging.h>
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/weighted_softmax_loss_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label,
const bool weight_by_label_freqs, const float* label_counts,
Dtype* loss, const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
if (weight_by_label_freqs) {
loss[index] *= static_cast<Dtype>(label_counts[label_value]);
}
counts[index] = 1;
}
}
}
template <typename Dtype>
void WeightedSoftmaxWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
const float* label_count_data =
weight_by_label_freqs_ ? label_counts_.gpu_data() : NULL;
// NOLINT_NEXT_LINE(whitespace/operators)
SoftmaxLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label,
weight_by_label_freqs_, label_count_data , loss_data,
outer_num_, dim, inner_num_,
has_ignore_label_, ignore_label_, counts);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
if (normalize_) {
Dtype count;
caffe_gpu_asum(nthreads, counts, &count);
loss /= count;
} else {
loss /= outer_num_;
}
top[0]->mutable_cpu_data()[0] = loss;
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, const bool weight_by_label_freqs,
const float* label_counts, Dtype* bottom_diff,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
const int idx = n * dim + label_value * spatial_dim + s;
bottom_diff[idx] -= 1;
if (weight_by_label_freqs) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] *= static_cast<Dtype>(label_counts[label_value]);
}
}
counts[index] = 1;
}
}
}
template <typename Dtype>
void WeightedSoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
const float* label_count_data =
weight_by_label_freqs_ ? label_counts_.gpu_data() : NULL;
// NOLINT_NEXT_LINE(whitespace/operators)
SoftmaxLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label,
weight_by_label_freqs_, label_count_data, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_,
ignore_label_, counts);
const Dtype loss_weight = top[0]->cpu_diff()[0];
if (normalize_) {
Dtype count;
caffe_gpu_asum(nthreads, counts, &count);
caffe_gpu_scal(prob_.count(), loss_weight / count, bottom_diff);
} else {
caffe_gpu_scal(prob_.count(), loss_weight / outer_num_, bottom_diff);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(WeightedSoftmaxWithLossLayer);
} // namespace caffe
|
29fd6df32a14d88ad9269e3ab2125e11f17a7eb0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date
@precisions normal z -> s d c
@author Jakub Kurzak
@author Stan Tomov
@author Mark Gates
[zcds]gemm_fermi.cu defines the CPU driver.
[zcds]gemm_fermi_kernels.h defines the block sizes for each precision.
gemm_stencil_defs.h defines types and functions for precision-independent code.
These files are included multiple times, once for each transpose version.
gemm_stencil.cuh defines the GPU kernel (device function).
gemm_kernel.cuh defines the GPU kernel (global function).
The batched version uses gemm_kernel_batched.cuh instead of gemm_kernel.cuh.
*/
#include "magma_internal.h"
#include "commonblas_z.h"
#define PRECISION_z
#include "zgemm_fermi_kernels.h"
/***************************************************************************//**
Purpose
-------
ZGEMM performs one of the matrix-matrix operations
C = alpha*op( A )*op( B ) + beta*C,
where op( X ) is one of
op( X ) = X or
op( X ) = X**T or
op( X ) = X**H,
alpha and beta are scalars, and A, B and C are matrices, with
op( A ) an m by k matrix, op( B ) a k by n matrix and C an m by n matrix.
Parameters
----------
@param[in]
transA magma_trans_t.
On entry, transA specifies the form of op( A ) to be used in
the matrix multiplication as follows:
- = MagmaNoTrans: op( A ) = A.
- = MagmaTrans: op( A ) = A**T.
- = MagmaConjTrans: op( A ) = A**H.
@param[in]
transB magma_trans_t.
On entry, transB specifies the form of op( B ) to be used in
the matrix multiplication as follows:
- = MagmaNoTrans: op( B ) = B.
- = MagmaTrans: op( B ) = B**T.
- = MagmaConjTrans: op( B ) = B**H.
@param[in]
m INTEGER.
On entry, M specifies the number of rows of the matrix
op( dA ) and of the matrix dC. M must be at least zero.
@param[in]
n INTEGER.
On entry, N specifies the number of columns of the matrix
op( dB ) and the number of columns of the matrix dC. N must be
at least zero.
@param[in]
k INTEGER.
On entry, K specifies the number of columns of the matrix
op( dA ) and the number of rows of the matrix op( dB ). K must
be at least zero.
@param[in]
alpha COMPLEX_16
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA COMPLEX_16 array of DIMENSION ( LDA, ka ), where ka is
k when transA = MagmaNoTrans, and is m otherwise.
Before entry with transA = MagmaNoTrans, the leading m by k
part of the array dA must contain the matrix dA, otherwise
the leading k by m part of the array dA must contain the
matrix dA.
@param[in]
ldda INTEGER.
On entry, LDA specifies the first dimension of A as declared
in the calling (sub) program. When transA = MagmaNoTrans then
LDA must be at least max( 1, m ), otherwise LDA must be at
least max( 1, k ).
@param[in]
dB COMPLEX_16 array of DIMENSION ( LDB, kb ), where kb is
n when transB = MagmaNoTrans, and is k otherwise.
Before entry with transB = MagmaNoTrans, the leading k by n
part of the array dB must contain the matrix dB, otherwise
the leading n by k part of the array dB must contain the
matrix dB.
@param[in]
lddb INTEGER.
On entry, LDB specifies the first dimension of dB as declared
in the calling (sub) program. When transB = MagmaNoTrans then
LDB must be at least max( 1, k ), otherwise LDB must be at
least max( 1, n ).
@param[in]
beta COMPLEX_16.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then dC need not be set on input.
@param[in,out]
dC COMPLEX_16 array of DIMENSION ( LDC, n ).
Before entry, the leading m by n part of the array dC must
contain the matrix dC, except when beta is zero, in which
case dC need not be set on entry.
On exit, the array dC is overwritten by the m by n matrix
( alpha*op( dA )*op( dB ) + beta*dC ).
@param[in]
lddc INTEGER.
On entry, LDC specifies the first dimension of dC as declared
in the calling (sub) program. LDC must be at least
max( 1, m ).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_gemm
*******************************************************************************/
extern "C" void
magmablas_zgemm(
magma_trans_t transA, magma_trans_t transB, magma_int_t m, magma_int_t n, magma_int_t k,
magmaDoubleComplex alpha,
magmaDoubleComplex_const_ptr dA, magma_int_t ldda,
magmaDoubleComplex_const_ptr dB, magma_int_t lddb,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr dC, magma_int_t lddc,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( transA != MagmaNoTrans && transA != MagmaTrans && transA != MagmaConjTrans )
info = -1;
else if ( transB != MagmaNoTrans && transB != MagmaTrans && transB != MagmaConjTrans )
info = -2;
else if ( m < 0 )
info = -3;
else if ( n < 0 )
info = -4;
else if ( k < 0 )
info = -5;
else if ( transA == MagmaNoTrans ? ldda < m : ldda < k )
info = -8;
else if ( transB == MagmaNoTrans ? lddb < k : lddb < n )
info = -10;
else if ( lddc < m )
info = -13;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
if ( m <= 0 || n <= 0 || k <= 0 )
return;
size_t offsetA = 0;
size_t offsetB = 0;
int TransA = 2, TransB = 2;
if ( transA == MagmaTrans )
TransA = 1;
else if ( transA == MagmaNoTrans )
TransA = 0;
if ( transB == MagmaTrans )
TransB = 1;
else if ( transB == MagmaNoTrans )
TransB = 0;
magma_int_t Am = ( ! TransA ? m : k);
magma_int_t An = (!TransA ? k : m);
magma_int_t Bm = ( ! TransB ? k : n);
magma_int_t Bn = (!TransB ? n : k);
size_t sizeA = (size_t) ldda * (An - 1) + Am;
size_t sizeB = (size_t) lddb * (Bn - 1) + Bm;
size_t CUBLAS_MAX_1DBUF_SIZE = ((1 << 27) - 512);
if ( sizeA >= CUBLAS_MAX_1DBUF_SIZE ||
sizeB >= CUBLAS_MAX_1DBUF_SIZE )
{
magma_zgemm( transA, transB, m, n, k, alpha,
dA, ldda, dB, lddb,
beta, dC, lddc, queue );
return;
}
#ifdef TEXTURE_1D
// Set textures parameters
tex_ref_A.normalized = false;
tex_ref_A.filterMode = hipFilterModePoint;
tex_ref_A.addressMode[0] = hipAddressModeClamp;
tex_ref_B.normalized = false;
tex_ref_B.filterMode = hipFilterModePoint;
tex_ref_B.addressMode[0] = hipAddressModeClamp;
// Bind A and B to texture references
hipError_t err;
err = hipBindTexture(&offsetA, tex_ref_A, dA, sizeA*sizeof(magmaDoubleComplex));
if ( err != hipSuccess ) {
fprintf( stderr, "cannot bind A to texture: %s (%d)\n", hipGetErrorString(err), err );
return;
}
err = hipBindTexture(&offsetB, tex_ref_B, dB, sizeB*sizeof(magmaDoubleComplex));
if ( err != hipSuccess ) {
fprintf( stderr, "cannot bind B to texture: %s (%d)\n", hipGetErrorString(err), err );
hipUnbindTexture( tex_ref_A );
return;
}
#endif
// Set up grids
dim3 dimBlock(DIM_X, DIM_Y);
offsetA = offsetA/sizeof(dA[0]);
offsetB = offsetB/sizeof(dB[0]);
if ( TransA == 0 && TransB == 0 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_nn ),
magma_ceildiv( n, BLK_N_nn ) );
hipLaunchKernelGGL(( zgemm_kernel_fermi_nn), dim3(dimGrid), dim3(dimBlock), 0, queue->cuda_stream() ,
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 0 && TransB == 1 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_nt ),
magma_ceildiv( n, BLK_N_nt ) );
hipLaunchKernelGGL(( zgemm_kernel_fermi_nt), dim3(dimGrid), dim3(dimBlock), 0, queue->cuda_stream() ,
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 0 && TransB == 2 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_nc ),
magma_ceildiv( n, BLK_N_nc ) );
hipLaunchKernelGGL(( zgemm_kernel_fermi_nc), dim3(dimGrid), dim3(dimBlock), 0, queue->cuda_stream() ,
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 1 && TransB == 0 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_tn ),
magma_ceildiv( n, BLK_N_tn ) );
hipLaunchKernelGGL(( zgemm_kernel_fermi_tn), dim3(dimGrid), dim3(dimBlock), 0, queue->cuda_stream() ,
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 1 && TransB == 1 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_tt ),
magma_ceildiv( n, BLK_N_tt ) );
hipLaunchKernelGGL(( zgemm_kernel_fermi_tt), dim3(dimGrid), dim3(dimBlock), 0, queue->cuda_stream() ,
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 1 && TransB == 2 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_tc ),
magma_ceildiv( n, BLK_N_tc ) );
hipLaunchKernelGGL(( zgemm_kernel_fermi_tc), dim3(dimGrid), dim3(dimBlock), 0, queue->cuda_stream() ,
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 2 && TransB == 0 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_cn ),
magma_ceildiv( n, BLK_N_cn ) );
hipLaunchKernelGGL(( zgemm_kernel_fermi_cn), dim3(dimGrid), dim3(dimBlock), 0, queue->cuda_stream() ,
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 2 && TransB == 1 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_ct ),
magma_ceildiv( n, BLK_N_ct ) );
hipLaunchKernelGGL(( zgemm_kernel_fermi_ct), dim3(dimGrid), dim3(dimBlock), 0, queue->cuda_stream() ,
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 2 && TransB == 2 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_cc ),
magma_ceildiv( n, BLK_N_cc ) );
hipLaunchKernelGGL(( zgemm_kernel_fermi_cc), dim3(dimGrid), dim3(dimBlock), 0, queue->cuda_stream() ,
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
#ifdef TEXTURE_1D
hipUnbindTexture( tex_ref_A );
hipUnbindTexture( tex_ref_B );
#endif
}
|
29fd6df32a14d88ad9269e3ab2125e11f17a7eb0.cu
|
/*
-- MAGMA (version 2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date
@precisions normal z -> s d c
@author Jakub Kurzak
@author Stan Tomov
@author Mark Gates
[zcds]gemm_fermi.cu defines the CPU driver.
[zcds]gemm_fermi_kernels.h defines the block sizes for each precision.
gemm_stencil_defs.h defines types and functions for precision-independent code.
These files are included multiple times, once for each transpose version.
gemm_stencil.cuh defines the GPU kernel (device function).
gemm_kernel.cuh defines the GPU kernel (global function).
The batched version uses gemm_kernel_batched.cuh instead of gemm_kernel.cuh.
*/
#include "magma_internal.h"
#include "commonblas_z.h"
#define PRECISION_z
#include "zgemm_fermi_kernels.h"
/***************************************************************************//**
Purpose
-------
ZGEMM performs one of the matrix-matrix operations
C = alpha*op( A )*op( B ) + beta*C,
where op( X ) is one of
op( X ) = X or
op( X ) = X**T or
op( X ) = X**H,
alpha and beta are scalars, and A, B and C are matrices, with
op( A ) an m by k matrix, op( B ) a k by n matrix and C an m by n matrix.
Parameters
----------
@param[in]
transA magma_trans_t.
On entry, transA specifies the form of op( A ) to be used in
the matrix multiplication as follows:
- = MagmaNoTrans: op( A ) = A.
- = MagmaTrans: op( A ) = A**T.
- = MagmaConjTrans: op( A ) = A**H.
@param[in]
transB magma_trans_t.
On entry, transB specifies the form of op( B ) to be used in
the matrix multiplication as follows:
- = MagmaNoTrans: op( B ) = B.
- = MagmaTrans: op( B ) = B**T.
- = MagmaConjTrans: op( B ) = B**H.
@param[in]
m INTEGER.
On entry, M specifies the number of rows of the matrix
op( dA ) and of the matrix dC. M must be at least zero.
@param[in]
n INTEGER.
On entry, N specifies the number of columns of the matrix
op( dB ) and the number of columns of the matrix dC. N must be
at least zero.
@param[in]
k INTEGER.
On entry, K specifies the number of columns of the matrix
op( dA ) and the number of rows of the matrix op( dB ). K must
be at least zero.
@param[in]
alpha COMPLEX_16
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA COMPLEX_16 array of DIMENSION ( LDA, ka ), where ka is
k when transA = MagmaNoTrans, and is m otherwise.
Before entry with transA = MagmaNoTrans, the leading m by k
part of the array dA must contain the matrix dA, otherwise
the leading k by m part of the array dA must contain the
matrix dA.
@param[in]
ldda INTEGER.
On entry, LDA specifies the first dimension of A as declared
in the calling (sub) program. When transA = MagmaNoTrans then
LDA must be at least max( 1, m ), otherwise LDA must be at
least max( 1, k ).
@param[in]
dB COMPLEX_16 array of DIMENSION ( LDB, kb ), where kb is
n when transB = MagmaNoTrans, and is k otherwise.
Before entry with transB = MagmaNoTrans, the leading k by n
part of the array dB must contain the matrix dB, otherwise
the leading n by k part of the array dB must contain the
matrix dB.
@param[in]
lddb INTEGER.
On entry, LDB specifies the first dimension of dB as declared
in the calling (sub) program. When transB = MagmaNoTrans then
LDB must be at least max( 1, k ), otherwise LDB must be at
least max( 1, n ).
@param[in]
beta COMPLEX_16.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then dC need not be set on input.
@param[in,out]
dC COMPLEX_16 array of DIMENSION ( LDC, n ).
Before entry, the leading m by n part of the array dC must
contain the matrix dC, except when beta is zero, in which
case dC need not be set on entry.
On exit, the array dC is overwritten by the m by n matrix
( alpha*op( dA )*op( dB ) + beta*dC ).
@param[in]
lddc INTEGER.
On entry, LDC specifies the first dimension of dC as declared
in the calling (sub) program. LDC must be at least
max( 1, m ).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_gemm
*******************************************************************************/
extern "C" void
magmablas_zgemm(
magma_trans_t transA, magma_trans_t transB, magma_int_t m, magma_int_t n, magma_int_t k,
magmaDoubleComplex alpha,
magmaDoubleComplex_const_ptr dA, magma_int_t ldda,
magmaDoubleComplex_const_ptr dB, magma_int_t lddb,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr dC, magma_int_t lddc,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( transA != MagmaNoTrans && transA != MagmaTrans && transA != MagmaConjTrans )
info = -1;
else if ( transB != MagmaNoTrans && transB != MagmaTrans && transB != MagmaConjTrans )
info = -2;
else if ( m < 0 )
info = -3;
else if ( n < 0 )
info = -4;
else if ( k < 0 )
info = -5;
else if ( transA == MagmaNoTrans ? ldda < m : ldda < k )
info = -8;
else if ( transB == MagmaNoTrans ? lddb < k : lddb < n )
info = -10;
else if ( lddc < m )
info = -13;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
if ( m <= 0 || n <= 0 || k <= 0 )
return;
size_t offsetA = 0;
size_t offsetB = 0;
int TransA = 2, TransB = 2;
if ( transA == MagmaTrans )
TransA = 1;
else if ( transA == MagmaNoTrans )
TransA = 0;
if ( transB == MagmaTrans )
TransB = 1;
else if ( transB == MagmaNoTrans )
TransB = 0;
magma_int_t Am = ( ! TransA ? m : k);
magma_int_t An = (!TransA ? k : m);
magma_int_t Bm = ( ! TransB ? k : n);
magma_int_t Bn = (!TransB ? n : k);
size_t sizeA = (size_t) ldda * (An - 1) + Am;
size_t sizeB = (size_t) lddb * (Bn - 1) + Bm;
size_t CUBLAS_MAX_1DBUF_SIZE = ((1 << 27) - 512);
if ( sizeA >= CUBLAS_MAX_1DBUF_SIZE ||
sizeB >= CUBLAS_MAX_1DBUF_SIZE )
{
magma_zgemm( transA, transB, m, n, k, alpha,
dA, ldda, dB, lddb,
beta, dC, lddc, queue );
return;
}
#ifdef TEXTURE_1D
// Set textures parameters
tex_ref_A.normalized = false;
tex_ref_A.filterMode = cudaFilterModePoint;
tex_ref_A.addressMode[0] = cudaAddressModeClamp;
tex_ref_B.normalized = false;
tex_ref_B.filterMode = cudaFilterModePoint;
tex_ref_B.addressMode[0] = cudaAddressModeClamp;
// Bind A and B to texture references
cudaError_t err;
err = cudaBindTexture(&offsetA, tex_ref_A, dA, sizeA*sizeof(magmaDoubleComplex));
if ( err != cudaSuccess ) {
fprintf( stderr, "cannot bind A to texture: %s (%d)\n", cudaGetErrorString(err), err );
return;
}
err = cudaBindTexture(&offsetB, tex_ref_B, dB, sizeB*sizeof(magmaDoubleComplex));
if ( err != cudaSuccess ) {
fprintf( stderr, "cannot bind B to texture: %s (%d)\n", cudaGetErrorString(err), err );
cudaUnbindTexture( tex_ref_A );
return;
}
#endif
// Set up grids
dim3 dimBlock(DIM_X, DIM_Y);
offsetA = offsetA/sizeof(dA[0]);
offsetB = offsetB/sizeof(dB[0]);
if ( TransA == 0 && TransB == 0 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_nn ),
magma_ceildiv( n, BLK_N_nn ) );
zgemm_kernel_fermi_nn<<< dimGrid, dimBlock, 0, queue->cuda_stream() >>>(
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 0 && TransB == 1 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_nt ),
magma_ceildiv( n, BLK_N_nt ) );
zgemm_kernel_fermi_nt<<< dimGrid, dimBlock, 0, queue->cuda_stream() >>>(
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 0 && TransB == 2 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_nc ),
magma_ceildiv( n, BLK_N_nc ) );
zgemm_kernel_fermi_nc<<< dimGrid, dimBlock, 0, queue->cuda_stream() >>>(
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 1 && TransB == 0 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_tn ),
magma_ceildiv( n, BLK_N_tn ) );
zgemm_kernel_fermi_tn<<< dimGrid, dimBlock, 0, queue->cuda_stream() >>>(
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 1 && TransB == 1 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_tt ),
magma_ceildiv( n, BLK_N_tt ) );
zgemm_kernel_fermi_tt<<< dimGrid, dimBlock, 0, queue->cuda_stream() >>>(
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 1 && TransB == 2 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_tc ),
magma_ceildiv( n, BLK_N_tc ) );
zgemm_kernel_fermi_tc<<< dimGrid, dimBlock, 0, queue->cuda_stream() >>>(
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 2 && TransB == 0 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_cn ),
magma_ceildiv( n, BLK_N_cn ) );
zgemm_kernel_fermi_cn<<< dimGrid, dimBlock, 0, queue->cuda_stream() >>>(
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 2 && TransB == 1 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_ct ),
magma_ceildiv( n, BLK_N_ct ) );
zgemm_kernel_fermi_ct<<< dimGrid, dimBlock, 0, queue->cuda_stream() >>>(
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 2 && TransB == 2 ) {
dim3 dimGrid( magma_ceildiv( m, BLK_M_cc ),
magma_ceildiv( n, BLK_N_cc ) );
zgemm_kernel_fermi_cc<<< dimGrid, dimBlock, 0, queue->cuda_stream() >>>(
m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta,
(int)offsetA, (int)offsetB );
}
#ifdef TEXTURE_1D
cudaUnbindTexture( tex_ref_A );
cudaUnbindTexture( tex_ref_B );
#endif
}
|
d67a51b414132c670a682e302316b26fc181c4d2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/hip/detail/KernelUtils.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/TensorUtils.h>
#include <ATen/TensorOperators.h>
#include <ATen/hip/detail/KernelUtils.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/Resize.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/binary_cross_entropy_backward_native.h>
#include <ATen/ops/binary_cross_entropy_native.h>
#include <ATen/ops/empty_like.h>
#include <ATen/ops/exp.h>
#include <ATen/ops/nll_loss_backward_native.h>
#include <ATen/ops/nll_loss_forward_native.h>
#include <ATen/ops/squeeze.h>
#endif
constexpr float EPSILON = 1e-12;
namespace {
using namespace at;
void binary_cross_entropy_backward_out_kernel(Tensor& grad_input, const Tensor& grad, const Tensor& input, const Tensor& target) {
at::TensorIterator iter = TensorIteratorConfig()
.add_output(grad_input)
.add_input(grad)
.add_input(input)
.add_input(target)
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "binary_cross_entropy_backward_out_cuda", [&]() {
at::native::gpu_kernel(iter, [] GPU_LAMBDA (
scalar_t grad_val,
scalar_t input_val,
scalar_t target_val
) -> scalar_t {
const scalar_t one = 1;
const scalar_t epsilon = EPSILON;
scalar_t grad_input_denominator = max(
(one - input_val) * input_val,
epsilon
);
return grad_val * (input_val - target_val) / grad_input_denominator;
}
);
});
}
} // namespace
namespace at::native {
Tensor binary_cross_entropy_cuda(const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
Tensor loss = at::empty_like(input);
return at::native::binary_cross_entropy_out_cuda(
input, target, weight, reduction, loss);
}
Tensor& binary_cross_entropy_out_cuda(const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction, Tensor& loss) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
Tensor loss_squeezed = at::squeeze(loss);
TensorIterator iter = TensorIteratorConfig()
.add_output(loss_squeezed)
.add_owned_input(at::squeeze(input))
.add_owned_input(at::squeeze(target))
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "binary_cross_entropy_out_cuda", [&]() {
gpu_kernel(iter,
[] GPU_LAMBDA (scalar_t input_val, scalar_t target_val) -> scalar_t {
const scalar_t zero = 0;
const scalar_t one = 1;
const scalar_t neg_100 = -100;
CUDA_KERNEL_ASSERT(input_val >= zero && input_val <= one);
CUDA_KERNEL_ASSERT(target_val >= zero && target_val <= one);
scalar_t log_input_val = ::log(input_val);
scalar_t log_1_minus_input_val = std::log1p(-input_val);
log_input_val = ::max(log_input_val, neg_100);
log_1_minus_input_val = ::max(log_1_minus_input_val, neg_100);
return ((target_val - one) * log_1_minus_input_val) - (target_val * log_input_val);
}
);
});
if (weight.defined()) {
loss.mul_(weight);
}
if (reduction != at::Reduction::None) {
Tensor loss_reduced;
if (reduction == at::Reduction::Mean) {
loss_reduced = loss.mean();
} else if (reduction == at::Reduction::Sum) {
loss_reduced = loss.sum();
}
loss.resize_as_(loss_reduced).copy_(loss_reduced);
}
return loss;
}
Tensor binary_cross_entropy_backward_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
Tensor grad_input = at::empty_like(input);
return at::native::binary_cross_entropy_backward_out_cuda(
grad, input, target, weight, reduction, grad_input);
}
Tensor& binary_cross_entropy_backward_out_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction, Tensor& grad_input) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
Tensor grad_expand = grad.expand_as(input);
binary_cross_entropy_backward_out_kernel(grad_input, grad_expand, input, target);
if (weight.defined()) {
grad_input.mul_(weight);
}
if (reduction == at::Reduction::Mean) {
grad_input.div_(input.numel());
}
return grad_input;
}
// -----------------------------------
// nll_loss
// -----------------------------------
namespace {
constexpr int NLL_LOSS_THREADS = 32;
// NOTE(crcrpar): `Byte` support was added for https://github.com/pytorch/pytorch/issues/59765.
#define AT_DISPATCH_NLL_LOSS_INDEX_TYPES(TYPE, NAME, ...) \
AT_DISPATCH_SWITCH(TYPE, NAME, \
AT_PRIVATE_CASE_TYPE_USING_HINT(at::ScalarType::Byte, index_t, __VA_ARGS__) \
AT_PRIVATE_CASE_TYPE_USING_HINT(at::ScalarType::Long, index_t, __VA_ARGS__))
template <typename scalar_t, typename index_t>
__global__ void nll_loss_forward_no_reduce_cuda_kernel(
int64_t batch_size,
PackedTensorAccessor64<scalar_t, 2> input,
index_t* target,
scalar_t* output,
scalar_t* weights,
int64_t n_classes,
int64_t ignore_index) {
CUDA_KERNEL_LOOP(index, batch_size) {
index_t cur_target = target[index];
if (cur_target == ignore_index) {
output[index] = static_cast<scalar_t>(0);
continue;
}
CUDA_KERNEL_ASSERT(cur_target >= 0 && cur_target < n_classes);
auto cur_weight =
weights != nullptr ? weights[cur_target] : static_cast<scalar_t>(1);
output[index] = -cur_weight * input[index][cur_target];
}
}
template <typename scalar_t, typename index_t>
__global__ void nll_loss_forward_reduce_cuda_kernel_1d(
scalar_t* output,
scalar_t* total_weight,
scalar_t* input,
index_t* target,
scalar_t* weights,
bool size_average,
int64_t n_classes,
int64_t ignore_index) {
CUDA_KERNEL_ASSERT(threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0);
const index_t t = *target;
if (t != ignore_index) {
CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes);
const auto cur_weight = weights != nullptr ? weights[t] : scalar_t{1};
*total_weight = cur_weight;
if (size_average) {
// If we try to normalize a zero then we return a NaN
if (cur_weight == 0) {
*output = std::numeric_limits<scalar_t>::quiet_NaN();
} else {
*output = -input[t];
}
} else {
*output = -cur_weight * input[t];
}
} else {
// If the only element was omited, we get 0. See the discussion in
// https://github.com/pytorch/pytorch/pull/64572#issuecomment-926504162
*output = scalar_t{0};
*total_weight = scalar_t{0};
}
}
template <typename scalar_t, typename accscalar_t, typename index_t>
__global__ void nll_loss_forward_reduce_cuda_kernel_2d(
scalar_t* output,
scalar_t* total_weight,
scalar_t* input,
index_t* target,
scalar_t* weights,
bool size_average,
int64_t nframe,
int64_t ndim,
int64_t n_classes,
int64_t ignore_index) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
__shared__ accscalar_t sh_inputs[NLL_LOSS_THREADS],
acc_weight[NLL_LOSS_THREADS];
sh_inputs[threadIdx.x] = static_cast<accscalar_t>(0);
acc_weight[threadIdx.x] = static_cast<accscalar_t>(0);
for (int i = threadIdx.x; i < nframe; i += NLL_LOSS_THREADS) {
index_t t = target[i];
if (t != ignore_index) {
CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes);
scalar_t cur_weight =
weights != nullptr ? weights[t] : static_cast<scalar_t>(1);
sh_inputs[threadIdx.x] -= input[i * ndim + t] * cur_weight;
acc_weight[threadIdx.x] += cur_weight;
}
}
__syncthreads();
if (threadIdx.x == 0) {
accscalar_t output_acc = 0;
accscalar_t total_weight_acc = 0;
for (int i = 0; i < NLL_LOSS_THREADS; ++i) {
output_acc += sh_inputs[i];
total_weight_acc += acc_weight[i];
}
*total_weight = static_cast<scalar_t>(total_weight_acc);
if (size_average) {
*output = static_cast<scalar_t>(output_acc / total_weight_acc);
} else {
*output = static_cast<scalar_t>(output_acc);
}
}
}
void nll_loss_forward_out_cuda_template(
const Tensor& output,
const Tensor& total_weight,
const Tensor& input_,
const Tensor& target_,
const Tensor& weight,
int64_t reduction,
int64_t ignore_index) {
auto input = *input_.expect_contiguous();
auto target = *target_.expect_contiguous();
int64_t n_classes = input.size(-1);
int64_t n_dims = input.dim();
int64_t batch_size = n_dims == 1 ? 1 : input.size(0);
auto weight_ = weight.defined() ? weight.contiguous() : weight;
if (reduction == Reduction::None && n_dims == 2) {
at::native::resize_output(output, {batch_size});
total_weight.zero_();
if (batch_size == 0) {
// This guards from unnecessary operations and launching CUDA kernel with
// 0 blocks.
return;
}
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_forward_no_reduce_cuda_kernel",
[&] {
AT_DISPATCH_NLL_LOSS_INDEX_TYPES(
target.scalar_type(),
"nll_loss_forward_no_reduce_cuda_kernel_index",
[&] {
hipLaunchKernelGGL(( nll_loss_forward_no_reduce_cuda_kernel<scalar_t, index_t>)
, dim3(at::cuda::detail::GET_BLOCKS(batch_size)),
dim3(at::cuda::detail::CUDA_NUM_THREADS),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
batch_size,
input.packed_accessor64<scalar_t, 2>(),
target.data_ptr<index_t>(),
output.data_ptr<scalar_t>(),
weight_.defined() ? weight_.data_ptr<scalar_t>()
: nullptr,
n_classes,
ignore_index);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
});
return;
}
// produce scalar outputs for the reduction case
at::native::resize_output(output, {});
total_weight.resize_({});
if (target.numel() == 0) {
// Here target (and input) have zero elements
// Mean reduction on empty tensors produces NaN. See the discussion in
// https://github.com/pytorch/pytorch/pull/64572#issuecomment-926504162
if (reduction == Reduction::Mean) {
output.fill_(std::numeric_limits<double>::quiet_NaN());
} else {
output.zero_();
}
total_weight.zero_();
return;
}
if (n_dims == 1) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_forward_reduce_cuda_kernel_1d",
[&] {
AT_DISPATCH_NLL_LOSS_INDEX_TYPES(
target.scalar_type(),
"nll_loss_forward_reduce_cuda_kernel_1d_index",
[&] {
hipLaunchKernelGGL(( nll_loss_forward_reduce_cuda_kernel_1d<scalar_t, index_t>)
, dim3(1), dim3(1), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
output.data_ptr<scalar_t>(),
total_weight.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
target.data_ptr<index_t>(),
weight_.defined() ? weight_.data_ptr<scalar_t>()
: nullptr,
reduction == at::Reduction::Mean,
n_classes,
ignore_index);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
});
} else if (n_dims == 2) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_forward_reduce_cuda_kernel_2d",
[&] {
AT_DISPATCH_NLL_LOSS_INDEX_TYPES(
target.scalar_type(),
"nll_loss_forward_reduce_cuda_kernel_2d_index",
[&] {
using accscalar_t = at::acc_type<scalar_t, /*is_cuda*/true>;
hipLaunchKernelGGL(( nll_loss_forward_reduce_cuda_kernel_2d<scalar_t, accscalar_t, index_t>)
, dim3(1),
dim3(NLL_LOSS_THREADS),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
output.data_ptr<scalar_t>(),
total_weight.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
target.data_ptr<index_t>(),
weight_.defined() ? weight_.data_ptr<scalar_t>()
: nullptr,
reduction == at::Reduction::Mean,
input.size(0),
input.size(1),
n_classes,
ignore_index);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
});
}
}
template <typename scalar_t, typename index_t>
__global__ void nll_loss_backward_no_reduce_cuda_kernel(
int batch_size,
index_t *target,
PackedTensorAccessor64<scalar_t, 1> grad_output,
PackedTensorAccessor64<scalar_t, 2> grad_input,
scalar_t *weights,
int64_t n_classes,
int64_t ignore_index) {
CUDA_KERNEL_LOOP(index, batch_size) {
index_t cur_target = target[index];
if (cur_target == ignore_index) {
continue;
}
CUDA_KERNEL_ASSERT(cur_target >= 0 && cur_target < n_classes);
scalar_t weight = weights != nullptr ? weights[cur_target] : static_cast<scalar_t>(1);
grad_input[index][cur_target] = -weight * grad_output[index];
}
};
template <typename scalar_t, typename index_t>
__global__ void nll_loss_backward_reduce_cuda_kernel_1d(
scalar_t *grad_input,
scalar_t *grad_output,
scalar_t *weights,
index_t *target,
scalar_t *total_weight,
bool size_average,
int64_t n_classes,
int64_t ignore_index
) {
const index_t t = *target;
if (t != ignore_index) {
CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes);
const auto grad = -(size_average ? *grad_output / *total_weight : *grad_output);
grad_input[t] = weights != nullptr ? weights[t] * grad : grad;
}
}
template <typename T> struct bwd_index_type { using type = T; };
template<> struct bwd_index_type<uint8_t> { using type = int; };
template<> struct bwd_index_type<int64_t> { using type = uint64_t; };
template <typename scalar_t, typename index_t>
__global__ void nll_loss_backward_reduce_cuda_kernel_2d(
scalar_t* grad_input,
scalar_t* grad_output,
index_t* target,
scalar_t* weights,
scalar_t* total_weight,
bool size_average,
int nframe,
int ndim,
int64_t n_classes,
int64_t ignore_index) {
using bwd_index_t = typename bwd_index_type<index_t>::type;
const auto grad = -(size_average ? *grad_output / *total_weight
: *grad_output);
for (int i = threadIdx.x; i < nframe; i += NLL_LOSS_THREADS) {
const index_t t = target[i];
if (t != ignore_index) {
CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes);
// NOTE(crcrpar): this index could overflow in int64_t as `t` itself can be close to the max.
const bwd_index_t index = static_cast<bwd_index_t>(i) * ndim + t;
CUDA_KERNEL_ASSERT(index >= 0);
grad_input[index] = weights != nullptr ? weights[t] * grad : grad;
}
}
}
void nll_loss_backward_out_cuda_template(
const Tensor& grad_input_,
const Tensor& grad_output_,
const Tensor& input_,
const Tensor& target_,
const Tensor& total_weight,
const Tensor& weight,
int64_t reduction,
int64_t ignore_index) {
auto target = *target_.expect_contiguous();
auto input = *input_.expect_contiguous();
auto grad_input = *grad_input_.expect_contiguous();
auto grad_output = *grad_output_.expect_contiguous();
int64_t n_dims = input.dim();
int64_t n_classes = input.size(-1);
int64_t batch_size = n_dims == 1 ? 1 : input.size(0);
auto weight_ = weight.defined() ? weight.contiguous() : weight;
if (reduction == at::Reduction::None && n_dims == 2) {
if (batch_size == 0) {
// This guards from unnecessary operations and launching CUDA kernel with 0 blocks.
return;
}
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_backward_no_reduce_cuda_kernel",
[&] {
AT_DISPATCH_NLL_LOSS_INDEX_TYPES(
target.scalar_type(),
"nll_loss_backward_no_reduce_cuda_kernel_index",
[&] {
hipLaunchKernelGGL(( nll_loss_backward_no_reduce_cuda_kernel<scalar_t, index_t>)
, dim3(at::cuda::detail::GET_BLOCKS(batch_size)),
dim3(at::cuda::detail::CUDA_NUM_THREADS),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
batch_size,
target.data_ptr<index_t>(),
grad_output.packed_accessor64<scalar_t, 1>(),
grad_input.packed_accessor64<scalar_t, 2>(),
weight.defined() ? weight_.data_ptr<scalar_t>() : nullptr,
n_classes,
ignore_index);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
});
return;
}
if (n_dims == 1) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_backward_reduce_cuda_kernel_1d",
[&] {
AT_DISPATCH_NLL_LOSS_INDEX_TYPES(
target.scalar_type(),
"nll_loss_backward_reduce_cuda_kernel_1d_index",
[&] {
hipLaunchKernelGGL(( nll_loss_backward_reduce_cuda_kernel_1d<scalar_t, index_t>)
, dim3(1), dim3(1), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_input.data_ptr<scalar_t>(),
grad_output.data_ptr<scalar_t>(),
weight.defined() ? weight_.data_ptr<scalar_t>()
: nullptr,
target.data_ptr<index_t>(),
total_weight.data_ptr<scalar_t>(),
reduction == at::Reduction::Mean,
n_classes,
ignore_index);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_backward_reduce_cuda_kernel_2d",
[&] {
AT_DISPATCH_NLL_LOSS_INDEX_TYPES(
target.scalar_type(),
"nll_loss_backward_reduce_cuda_kernel_2d_index",
[&] {
hipLaunchKernelGGL(( nll_loss_backward_reduce_cuda_kernel_2d<scalar_t, index_t>)
, dim3(1), dim3(NLL_LOSS_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_input.data_ptr<scalar_t>(),
grad_output.data_ptr<scalar_t>(),
target.data_ptr<index_t>(),
weight.defined() ? weight_.data_ptr<scalar_t>() : nullptr,
total_weight.data_ptr<scalar_t>(),
reduction == at::Reduction::Mean,
input.size(0),
input.size(1),
n_classes,
ignore_index);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
});
}
}
#undef AT_DISPATCH_NLL_LOSS_INDEX_TYPES
} // namespace
TORCH_IMPL_FUNC(nll_loss_forward_out_cuda)
(const Tensor& self,
const Tensor& target,
const OptionalTensorRef weight_opt,
int64_t reduction,
int64_t ignore_index,
const Tensor& output,
const Tensor& total_weight) {
const Tensor& weight = weight_opt.getTensorRef();
nll_loss_forward_out_cuda_template(
output, total_weight, self, target, weight, reduction, ignore_index);
}
TORCH_IMPL_FUNC(nll_loss_backward_out_cuda)
(const Tensor& grad_output,
const Tensor& self,
const Tensor& target,
OptionalTensorRef weight_opt,
int64_t reduction,
int64_t ignore_index,
const Tensor& total_weight,
const Tensor& grad_input) {
const Tensor& weight = weight_opt.getTensorRef();
grad_input.zero_();
nll_loss_backward_out_cuda_template(
grad_input,
grad_output,
self,
target,
total_weight,
weight,
reduction,
ignore_index);
}
} // namespace at::native
|
d67a51b414132c670a682e302316b26fc181c4d2.cu
|
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/cuda/detail/KernelUtils.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/TensorUtils.h>
#include <ATen/TensorOperators.h>
#include <ATen/cuda/detail/KernelUtils.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/Resize.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/binary_cross_entropy_backward_native.h>
#include <ATen/ops/binary_cross_entropy_native.h>
#include <ATen/ops/empty_like.h>
#include <ATen/ops/exp.h>
#include <ATen/ops/nll_loss_backward_native.h>
#include <ATen/ops/nll_loss_forward_native.h>
#include <ATen/ops/squeeze.h>
#endif
constexpr float EPSILON = 1e-12;
namespace {
using namespace at;
void binary_cross_entropy_backward_out_kernel(Tensor& grad_input, const Tensor& grad, const Tensor& input, const Tensor& target) {
at::TensorIterator iter = TensorIteratorConfig()
.add_output(grad_input)
.add_input(grad)
.add_input(input)
.add_input(target)
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "binary_cross_entropy_backward_out_cuda", [&]() {
at::native::gpu_kernel(iter, [] GPU_LAMBDA (
scalar_t grad_val,
scalar_t input_val,
scalar_t target_val
) -> scalar_t {
const scalar_t one = 1;
const scalar_t epsilon = EPSILON;
scalar_t grad_input_denominator = max(
(one - input_val) * input_val,
epsilon
);
return grad_val * (input_val - target_val) / grad_input_denominator;
}
);
});
}
} // namespace
namespace at::native {
Tensor binary_cross_entropy_cuda(const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
Tensor loss = at::empty_like(input);
return at::native::binary_cross_entropy_out_cuda(
input, target, weight, reduction, loss);
}
Tensor& binary_cross_entropy_out_cuda(const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction, Tensor& loss) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
Tensor loss_squeezed = at::squeeze(loss);
TensorIterator iter = TensorIteratorConfig()
.add_output(loss_squeezed)
.add_owned_input(at::squeeze(input))
.add_owned_input(at::squeeze(target))
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "binary_cross_entropy_out_cuda", [&]() {
gpu_kernel(iter,
[] GPU_LAMBDA (scalar_t input_val, scalar_t target_val) -> scalar_t {
const scalar_t zero = 0;
const scalar_t one = 1;
const scalar_t neg_100 = -100;
CUDA_KERNEL_ASSERT(input_val >= zero && input_val <= one);
CUDA_KERNEL_ASSERT(target_val >= zero && target_val <= one);
scalar_t log_input_val = std::log(input_val);
scalar_t log_1_minus_input_val = std::log1p(-input_val);
log_input_val = std::max(log_input_val, neg_100);
log_1_minus_input_val = std::max(log_1_minus_input_val, neg_100);
return ((target_val - one) * log_1_minus_input_val) - (target_val * log_input_val);
}
);
});
if (weight.defined()) {
loss.mul_(weight);
}
if (reduction != at::Reduction::None) {
Tensor loss_reduced;
if (reduction == at::Reduction::Mean) {
loss_reduced = loss.mean();
} else if (reduction == at::Reduction::Sum) {
loss_reduced = loss.sum();
}
loss.resize_as_(loss_reduced).copy_(loss_reduced);
}
return loss;
}
Tensor binary_cross_entropy_backward_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
Tensor grad_input = at::empty_like(input);
return at::native::binary_cross_entropy_backward_out_cuda(
grad, input, target, weight, reduction, grad_input);
}
Tensor& binary_cross_entropy_backward_out_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction, Tensor& grad_input) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
Tensor grad_expand = grad.expand_as(input);
binary_cross_entropy_backward_out_kernel(grad_input, grad_expand, input, target);
if (weight.defined()) {
grad_input.mul_(weight);
}
if (reduction == at::Reduction::Mean) {
grad_input.div_(input.numel());
}
return grad_input;
}
// -----------------------------------
// nll_loss
// -----------------------------------
namespace {
constexpr int NLL_LOSS_THREADS = 32;
// NOTE(crcrpar): `Byte` support was added for https://github.com/pytorch/pytorch/issues/59765.
#define AT_DISPATCH_NLL_LOSS_INDEX_TYPES(TYPE, NAME, ...) \
AT_DISPATCH_SWITCH(TYPE, NAME, \
AT_PRIVATE_CASE_TYPE_USING_HINT(at::ScalarType::Byte, index_t, __VA_ARGS__) \
AT_PRIVATE_CASE_TYPE_USING_HINT(at::ScalarType::Long, index_t, __VA_ARGS__))
template <typename scalar_t, typename index_t>
__global__ void nll_loss_forward_no_reduce_cuda_kernel(
int64_t batch_size,
PackedTensorAccessor64<scalar_t, 2> input,
index_t* target,
scalar_t* output,
scalar_t* weights,
int64_t n_classes,
int64_t ignore_index) {
CUDA_KERNEL_LOOP(index, batch_size) {
index_t cur_target = target[index];
if (cur_target == ignore_index) {
output[index] = static_cast<scalar_t>(0);
continue;
}
CUDA_KERNEL_ASSERT(cur_target >= 0 && cur_target < n_classes);
auto cur_weight =
weights != nullptr ? weights[cur_target] : static_cast<scalar_t>(1);
output[index] = -cur_weight * input[index][cur_target];
}
}
template <typename scalar_t, typename index_t>
__global__ void nll_loss_forward_reduce_cuda_kernel_1d(
scalar_t* output,
scalar_t* total_weight,
scalar_t* input,
index_t* target,
scalar_t* weights,
bool size_average,
int64_t n_classes,
int64_t ignore_index) {
CUDA_KERNEL_ASSERT(threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0);
const index_t t = *target;
if (t != ignore_index) {
CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes);
const auto cur_weight = weights != nullptr ? weights[t] : scalar_t{1};
*total_weight = cur_weight;
if (size_average) {
// If we try to normalize a zero then we return a NaN
if (cur_weight == 0) {
*output = std::numeric_limits<scalar_t>::quiet_NaN();
} else {
*output = -input[t];
}
} else {
*output = -cur_weight * input[t];
}
} else {
// If the only element was omited, we get 0. See the discussion in
// https://github.com/pytorch/pytorch/pull/64572#issuecomment-926504162
*output = scalar_t{0};
*total_weight = scalar_t{0};
}
}
template <typename scalar_t, typename accscalar_t, typename index_t>
__global__ void nll_loss_forward_reduce_cuda_kernel_2d(
scalar_t* output,
scalar_t* total_weight,
scalar_t* input,
index_t* target,
scalar_t* weights,
bool size_average,
int64_t nframe,
int64_t ndim,
int64_t n_classes,
int64_t ignore_index) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
__shared__ accscalar_t sh_inputs[NLL_LOSS_THREADS],
acc_weight[NLL_LOSS_THREADS];
sh_inputs[threadIdx.x] = static_cast<accscalar_t>(0);
acc_weight[threadIdx.x] = static_cast<accscalar_t>(0);
for (int i = threadIdx.x; i < nframe; i += NLL_LOSS_THREADS) {
index_t t = target[i];
if (t != ignore_index) {
CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes);
scalar_t cur_weight =
weights != nullptr ? weights[t] : static_cast<scalar_t>(1);
sh_inputs[threadIdx.x] -= input[i * ndim + t] * cur_weight;
acc_weight[threadIdx.x] += cur_weight;
}
}
__syncthreads();
if (threadIdx.x == 0) {
accscalar_t output_acc = 0;
accscalar_t total_weight_acc = 0;
for (int i = 0; i < NLL_LOSS_THREADS; ++i) {
output_acc += sh_inputs[i];
total_weight_acc += acc_weight[i];
}
*total_weight = static_cast<scalar_t>(total_weight_acc);
if (size_average) {
*output = static_cast<scalar_t>(output_acc / total_weight_acc);
} else {
*output = static_cast<scalar_t>(output_acc);
}
}
}
void nll_loss_forward_out_cuda_template(
const Tensor& output,
const Tensor& total_weight,
const Tensor& input_,
const Tensor& target_,
const Tensor& weight,
int64_t reduction,
int64_t ignore_index) {
auto input = *input_.expect_contiguous();
auto target = *target_.expect_contiguous();
int64_t n_classes = input.size(-1);
int64_t n_dims = input.dim();
int64_t batch_size = n_dims == 1 ? 1 : input.size(0);
auto weight_ = weight.defined() ? weight.contiguous() : weight;
if (reduction == Reduction::None && n_dims == 2) {
at::native::resize_output(output, {batch_size});
total_weight.zero_();
if (batch_size == 0) {
// This guards from unnecessary operations and launching CUDA kernel with
// 0 blocks.
return;
}
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_forward_no_reduce_cuda_kernel",
[&] {
AT_DISPATCH_NLL_LOSS_INDEX_TYPES(
target.scalar_type(),
"nll_loss_forward_no_reduce_cuda_kernel_index",
[&] {
nll_loss_forward_no_reduce_cuda_kernel<scalar_t, index_t>
<<<at::cuda::detail::GET_BLOCKS(batch_size),
at::cuda::detail::CUDA_NUM_THREADS,
0,
at::cuda::getCurrentCUDAStream()>>>(
batch_size,
input.packed_accessor64<scalar_t, 2>(),
target.data_ptr<index_t>(),
output.data_ptr<scalar_t>(),
weight_.defined() ? weight_.data_ptr<scalar_t>()
: nullptr,
n_classes,
ignore_index);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
});
return;
}
// produce scalar outputs for the reduction case
at::native::resize_output(output, {});
total_weight.resize_({});
if (target.numel() == 0) {
// Here target (and input) have zero elements
// Mean reduction on empty tensors produces NaN. See the discussion in
// https://github.com/pytorch/pytorch/pull/64572#issuecomment-926504162
if (reduction == Reduction::Mean) {
output.fill_(std::numeric_limits<double>::quiet_NaN());
} else {
output.zero_();
}
total_weight.zero_();
return;
}
if (n_dims == 1) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_forward_reduce_cuda_kernel_1d",
[&] {
AT_DISPATCH_NLL_LOSS_INDEX_TYPES(
target.scalar_type(),
"nll_loss_forward_reduce_cuda_kernel_1d_index",
[&] {
nll_loss_forward_reduce_cuda_kernel_1d<scalar_t, index_t>
<<<1, 1, 0, at::cuda::getCurrentCUDAStream()>>>(
output.data_ptr<scalar_t>(),
total_weight.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
target.data_ptr<index_t>(),
weight_.defined() ? weight_.data_ptr<scalar_t>()
: nullptr,
reduction == at::Reduction::Mean,
n_classes,
ignore_index);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
});
} else if (n_dims == 2) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_forward_reduce_cuda_kernel_2d",
[&] {
AT_DISPATCH_NLL_LOSS_INDEX_TYPES(
target.scalar_type(),
"nll_loss_forward_reduce_cuda_kernel_2d_index",
[&] {
using accscalar_t = at::acc_type<scalar_t, /*is_cuda*/true>;
nll_loss_forward_reduce_cuda_kernel_2d<scalar_t, accscalar_t, index_t>
<<<1,
NLL_LOSS_THREADS,
0,
at::cuda::getCurrentCUDAStream()>>>(
output.data_ptr<scalar_t>(),
total_weight.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
target.data_ptr<index_t>(),
weight_.defined() ? weight_.data_ptr<scalar_t>()
: nullptr,
reduction == at::Reduction::Mean,
input.size(0),
input.size(1),
n_classes,
ignore_index);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
});
}
}
template <typename scalar_t, typename index_t>
__global__ void nll_loss_backward_no_reduce_cuda_kernel(
int batch_size,
index_t *target,
PackedTensorAccessor64<scalar_t, 1> grad_output,
PackedTensorAccessor64<scalar_t, 2> grad_input,
scalar_t *weights,
int64_t n_classes,
int64_t ignore_index) {
CUDA_KERNEL_LOOP(index, batch_size) {
index_t cur_target = target[index];
if (cur_target == ignore_index) {
continue;
}
CUDA_KERNEL_ASSERT(cur_target >= 0 && cur_target < n_classes);
scalar_t weight = weights != nullptr ? weights[cur_target] : static_cast<scalar_t>(1);
grad_input[index][cur_target] = -weight * grad_output[index];
}
};
template <typename scalar_t, typename index_t>
__global__ void nll_loss_backward_reduce_cuda_kernel_1d(
scalar_t *grad_input,
scalar_t *grad_output,
scalar_t *weights,
index_t *target,
scalar_t *total_weight,
bool size_average,
int64_t n_classes,
int64_t ignore_index
) {
const index_t t = *target;
if (t != ignore_index) {
CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes);
const auto grad = -(size_average ? *grad_output / *total_weight : *grad_output);
grad_input[t] = weights != nullptr ? weights[t] * grad : grad;
}
}
template <typename T> struct bwd_index_type { using type = T; };
template<> struct bwd_index_type<uint8_t> { using type = int; };
template<> struct bwd_index_type<int64_t> { using type = uint64_t; };
template <typename scalar_t, typename index_t>
__global__ void nll_loss_backward_reduce_cuda_kernel_2d(
scalar_t* grad_input,
scalar_t* grad_output,
index_t* target,
scalar_t* weights,
scalar_t* total_weight,
bool size_average,
int nframe,
int ndim,
int64_t n_classes,
int64_t ignore_index) {
using bwd_index_t = typename bwd_index_type<index_t>::type;
const auto grad = -(size_average ? *grad_output / *total_weight
: *grad_output);
for (int i = threadIdx.x; i < nframe; i += NLL_LOSS_THREADS) {
const index_t t = target[i];
if (t != ignore_index) {
CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes);
// NOTE(crcrpar): this index could overflow in int64_t as `t` itself can be close to the max.
const bwd_index_t index = static_cast<bwd_index_t>(i) * ndim + t;
CUDA_KERNEL_ASSERT(index >= 0);
grad_input[index] = weights != nullptr ? weights[t] * grad : grad;
}
}
}
void nll_loss_backward_out_cuda_template(
const Tensor& grad_input_,
const Tensor& grad_output_,
const Tensor& input_,
const Tensor& target_,
const Tensor& total_weight,
const Tensor& weight,
int64_t reduction,
int64_t ignore_index) {
auto target = *target_.expect_contiguous();
auto input = *input_.expect_contiguous();
auto grad_input = *grad_input_.expect_contiguous();
auto grad_output = *grad_output_.expect_contiguous();
int64_t n_dims = input.dim();
int64_t n_classes = input.size(-1);
int64_t batch_size = n_dims == 1 ? 1 : input.size(0);
auto weight_ = weight.defined() ? weight.contiguous() : weight;
if (reduction == at::Reduction::None && n_dims == 2) {
if (batch_size == 0) {
// This guards from unnecessary operations and launching CUDA kernel with 0 blocks.
return;
}
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_backward_no_reduce_cuda_kernel",
[&] {
AT_DISPATCH_NLL_LOSS_INDEX_TYPES(
target.scalar_type(),
"nll_loss_backward_no_reduce_cuda_kernel_index",
[&] {
nll_loss_backward_no_reduce_cuda_kernel<scalar_t, index_t>
<<<at::cuda::detail::GET_BLOCKS(batch_size),
at::cuda::detail::CUDA_NUM_THREADS,
0,
at::cuda::getCurrentCUDAStream()>>>(
batch_size,
target.data_ptr<index_t>(),
grad_output.packed_accessor64<scalar_t, 1>(),
grad_input.packed_accessor64<scalar_t, 2>(),
weight.defined() ? weight_.data_ptr<scalar_t>() : nullptr,
n_classes,
ignore_index);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
});
return;
}
if (n_dims == 1) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_backward_reduce_cuda_kernel_1d",
[&] {
AT_DISPATCH_NLL_LOSS_INDEX_TYPES(
target.scalar_type(),
"nll_loss_backward_reduce_cuda_kernel_1d_index",
[&] {
nll_loss_backward_reduce_cuda_kernel_1d<scalar_t, index_t>
<<<1, 1, 0, at::cuda::getCurrentCUDAStream()>>>(
grad_input.data_ptr<scalar_t>(),
grad_output.data_ptr<scalar_t>(),
weight.defined() ? weight_.data_ptr<scalar_t>()
: nullptr,
target.data_ptr<index_t>(),
total_weight.data_ptr<scalar_t>(),
reduction == at::Reduction::Mean,
n_classes,
ignore_index);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_backward_reduce_cuda_kernel_2d",
[&] {
AT_DISPATCH_NLL_LOSS_INDEX_TYPES(
target.scalar_type(),
"nll_loss_backward_reduce_cuda_kernel_2d_index",
[&] {
nll_loss_backward_reduce_cuda_kernel_2d<scalar_t, index_t>
<<<1, NLL_LOSS_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>(
grad_input.data_ptr<scalar_t>(),
grad_output.data_ptr<scalar_t>(),
target.data_ptr<index_t>(),
weight.defined() ? weight_.data_ptr<scalar_t>() : nullptr,
total_weight.data_ptr<scalar_t>(),
reduction == at::Reduction::Mean,
input.size(0),
input.size(1),
n_classes,
ignore_index);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
});
}
}
#undef AT_DISPATCH_NLL_LOSS_INDEX_TYPES
} // namespace
TORCH_IMPL_FUNC(nll_loss_forward_out_cuda)
(const Tensor& self,
const Tensor& target,
const OptionalTensorRef weight_opt,
int64_t reduction,
int64_t ignore_index,
const Tensor& output,
const Tensor& total_weight) {
const Tensor& weight = weight_opt.getTensorRef();
nll_loss_forward_out_cuda_template(
output, total_weight, self, target, weight, reduction, ignore_index);
}
TORCH_IMPL_FUNC(nll_loss_backward_out_cuda)
(const Tensor& grad_output,
const Tensor& self,
const Tensor& target,
OptionalTensorRef weight_opt,
int64_t reduction,
int64_t ignore_index,
const Tensor& total_weight,
const Tensor& grad_input) {
const Tensor& weight = weight_opt.getTensorRef();
grad_input.zero_();
nll_loss_backward_out_cuda_template(
grad_input,
grad_output,
self,
target,
total_weight,
weight,
reduction,
ignore_index);
}
} // namespace at::native
|
9b71c019b53248564ca84f58f4dd2812c272bbae.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ATen/native/TensorTransformations.h"
#include "ATen/hip/detail/IndexUtils.cuh"
#include "ATen/NativeFunctions.h"
#include <cstddef>
#include <vector>
namespace at {
namespace native {
#define AT_APPLY_THREADS_PER_BLOCK 32 * 16
#define AT_APPLY_BLOCKS_PER_SM 4
template <typename scalar_t, typename IndexType>
#if __CUDA_ARCH__ >= 350
__launch_bounds__(AT_APPLY_THREADS_PER_BLOCK, AT_APPLY_BLOCKS_PER_SM)
#endif
__global__ void
kernel_pointwise_flip_apply2(const cuda::detail::TensorInfo<scalar_t, IndexType> in_tensor_info,
cuda::detail::TensorInfo<scalar_t, IndexType> out_tensor_info,
IndexType N,
int flip_dim,
IndexType total_dims) {
for (IndexType linear_index = blockIdx.x * blockDim.x + threadIdx.x; linear_index < N; linear_index += gridDim.x * blockDim.x) {
IndexType dst_offset = 0;
if (flip_dim == 0) {
// flip 1st dim
dst_offset = (in_tensor_info.sizes[0] - 1 - linear_index / in_tensor_info.strides[0]) * in_tensor_info.strides[0] + linear_index % in_tensor_info.strides[0];
}
else {
// flip last dim
IndexType i = total_dims - 1;
dst_offset = linear_index / in_tensor_info.strides[0] * in_tensor_info.strides[0] + (in_tensor_info.sizes[i] - 1 - linear_index % in_tensor_info.strides[0]);
}
out_tensor_info.data[dst_offset] = in_tensor_info.data[linear_index];
}
}
template <typename scalar_t>
__global__
void flip_cuda_kernel(scalar_t* in_tensor, scalar_t* out_tensor, int64_t N, int64_t* flip_dims, int64_t flip_dims_size,
int64_t* strides, int64_t* strides_contiguous, int64_t* shape, int64_t total_dims) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index >= N) {
return;
}
int64_t cur_indices = linear_index, rem = 0, dst_offset = 0;
for (int64_t i = 0; i < total_dims; i++) {
int64_t temp = cur_indices;
cur_indices = cur_indices / strides_contiguous[i];
rem = temp - cur_indices * strides_contiguous[i];
// flip the indices if it is in flip_dims
for (int64_t j = 0; j < flip_dims_size; j++) {
if (i == flip_dims[j]) {
cur_indices = shape[i] - 1 - cur_indices;
}
}
dst_offset += cur_indices * strides[i];
cur_indices = rem;
}
out_tensor[linear_index] = in_tensor[dst_offset];
}
// Flip tensor given a list of dims
Tensor flip_cuda(const Tensor& self, IntList dims) {
auto in_tensor = self;
const int64_t flip_dims_size = dims.size(), total_dims = in_tensor.dim(), N = in_tensor.numel();
check_errors(total_dims, flip_dims_size, dims);
int64_t block_size = 512;
dim3 dim_block(block_size);
dim3 dim_grid((N + block_size - 1) / block_size);
// use kernel_pointwise_flip_apply2 only when to-flip dim is the 1st or last dim, where collapseDims can reduce the amount of work
if (flip_dims_size == 1 && in_tensor.is_contiguous() && (dims[0] == 0 || dims[0] == total_dims - 1)) {
auto out_tensor = at::empty_like(self);
AT_DISPATCH_ALL_TYPES_AND_HALF(in_tensor.type(), "flip_cuda", [&] {
auto in_tensor_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(in_tensor);
auto out_tensor_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(out_tensor);
int flip_dim = in_tensor_info.collapseDims(dims[0]);
out_tensor_info.collapseDims(dims[0]);
hipLaunchKernelGGL(( kernel_pointwise_flip_apply2<scalar_t, int64_t>)
, dim3(dim_grid), dim3(dim_block), 0, globalContext().getCurrentHIPStreamMasqueradingAsCUDA(),
in_tensor_info, out_tensor_info, N, flip_dim, total_dims);
});
return out_tensor;
}
auto flip_dims = std::vector<int64_t>(dims);
auto flip_dims_t = at::CPU(kLong).tensorFromBlob(flip_dims.data(), {static_cast<int64_t>(flip_dims.size())});
auto shape = std::vector<int64_t>(in_tensor.sizes());
auto shape_t = at::CPU(kLong).tensorFromBlob(shape.data(), {static_cast<int64_t>(shape.size())});
auto strides = std::vector<int64_t>(in_tensor.strides());
auto strides_t = at::CPU(kLong).tensorFromBlob(strides.data(), {static_cast<int64_t>(strides.size())});
auto out_tensor = at::empty_like(in_tensor);
// stride_contiguous is the stride of non-contiguous tensor after calling contiguous(),
// it is used to compute indices for each element in non-contiguous tensor
Tensor stride_contiguous = at::zeros({total_dims}, kLong);
int64_t* stride_contiguous_d = stride_contiguous.data<int64_t>();
for (int64_t i = total_dims - 1; i >= 0; i--) {
if (i == total_dims - 1) {
stride_contiguous_d[i] = 1;
} else {
stride_contiguous_d[i] = std::max<int64_t>(shape[i+1], 1) * stride_contiguous_d[i + 1];
}
}
AT_DISPATCH_ALL_TYPES_AND_HALF(in_tensor.type(), "flip_cuda", [&] {
hipLaunchKernelGGL(( flip_cuda_kernel), dim3(dim_grid), dim3(dim_block), 0, globalContext().getCurrentHIPStreamMasqueradingAsCUDA(),
in_tensor.data<scalar_t>(), out_tensor.data<scalar_t>(), N, flip_dims_t.toType(CUDA(kLong)).data<int64_t>(), flip_dims_size,
strides_t.toType(CUDA(kLong)).data<int64_t>(), stride_contiguous.toType(CUDA(kLong)).data<int64_t>(), shape_t.toType(CUDA(kLong)).data<int64_t>(), total_dims);
});
return out_tensor;
}
}} // namespace at::native
|
9b71c019b53248564ca84f58f4dd2812c272bbae.cu
|
#include "ATen/native/TensorTransformations.h"
#include "ATen/cuda/detail/IndexUtils.cuh"
#include "ATen/NativeFunctions.h"
#include <cstddef>
#include <vector>
namespace at {
namespace native {
#define AT_APPLY_THREADS_PER_BLOCK 32 * 16
#define AT_APPLY_BLOCKS_PER_SM 4
template <typename scalar_t, typename IndexType>
#if __CUDA_ARCH__ >= 350
__launch_bounds__(AT_APPLY_THREADS_PER_BLOCK, AT_APPLY_BLOCKS_PER_SM)
#endif
__global__ void
kernel_pointwise_flip_apply2(const cuda::detail::TensorInfo<scalar_t, IndexType> in_tensor_info,
cuda::detail::TensorInfo<scalar_t, IndexType> out_tensor_info,
IndexType N,
int flip_dim,
IndexType total_dims) {
for (IndexType linear_index = blockIdx.x * blockDim.x + threadIdx.x; linear_index < N; linear_index += gridDim.x * blockDim.x) {
IndexType dst_offset = 0;
if (flip_dim == 0) {
// flip 1st dim
dst_offset = (in_tensor_info.sizes[0] - 1 - linear_index / in_tensor_info.strides[0]) * in_tensor_info.strides[0] + linear_index % in_tensor_info.strides[0];
}
else {
// flip last dim
IndexType i = total_dims - 1;
dst_offset = linear_index / in_tensor_info.strides[0] * in_tensor_info.strides[0] + (in_tensor_info.sizes[i] - 1 - linear_index % in_tensor_info.strides[0]);
}
out_tensor_info.data[dst_offset] = in_tensor_info.data[linear_index];
}
}
template <typename scalar_t>
__global__
void flip_cuda_kernel(scalar_t* in_tensor, scalar_t* out_tensor, int64_t N, int64_t* flip_dims, int64_t flip_dims_size,
int64_t* strides, int64_t* strides_contiguous, int64_t* shape, int64_t total_dims) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index >= N) {
return;
}
int64_t cur_indices = linear_index, rem = 0, dst_offset = 0;
for (int64_t i = 0; i < total_dims; i++) {
int64_t temp = cur_indices;
cur_indices = cur_indices / strides_contiguous[i];
rem = temp - cur_indices * strides_contiguous[i];
// flip the indices if it is in flip_dims
for (int64_t j = 0; j < flip_dims_size; j++) {
if (i == flip_dims[j]) {
cur_indices = shape[i] - 1 - cur_indices;
}
}
dst_offset += cur_indices * strides[i];
cur_indices = rem;
}
out_tensor[linear_index] = in_tensor[dst_offset];
}
// Flip tensor given a list of dims
Tensor flip_cuda(const Tensor& self, IntList dims) {
auto in_tensor = self;
const int64_t flip_dims_size = dims.size(), total_dims = in_tensor.dim(), N = in_tensor.numel();
check_errors(total_dims, flip_dims_size, dims);
int64_t block_size = 512;
dim3 dim_block(block_size);
dim3 dim_grid((N + block_size - 1) / block_size);
// use kernel_pointwise_flip_apply2 only when to-flip dim is the 1st or last dim, where collapseDims can reduce the amount of work
if (flip_dims_size == 1 && in_tensor.is_contiguous() && (dims[0] == 0 || dims[0] == total_dims - 1)) {
auto out_tensor = at::empty_like(self);
AT_DISPATCH_ALL_TYPES_AND_HALF(in_tensor.type(), "flip_cuda", [&] {
auto in_tensor_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(in_tensor);
auto out_tensor_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(out_tensor);
int flip_dim = in_tensor_info.collapseDims(dims[0]);
out_tensor_info.collapseDims(dims[0]);
kernel_pointwise_flip_apply2<scalar_t, int64_t>
<<<dim_grid, dim_block, 0, globalContext().getCurrentCUDAStream()>>>(
in_tensor_info, out_tensor_info, N, flip_dim, total_dims);
});
return out_tensor;
}
auto flip_dims = std::vector<int64_t>(dims);
auto flip_dims_t = at::CPU(kLong).tensorFromBlob(flip_dims.data(), {static_cast<int64_t>(flip_dims.size())});
auto shape = std::vector<int64_t>(in_tensor.sizes());
auto shape_t = at::CPU(kLong).tensorFromBlob(shape.data(), {static_cast<int64_t>(shape.size())});
auto strides = std::vector<int64_t>(in_tensor.strides());
auto strides_t = at::CPU(kLong).tensorFromBlob(strides.data(), {static_cast<int64_t>(strides.size())});
auto out_tensor = at::empty_like(in_tensor);
// stride_contiguous is the stride of non-contiguous tensor after calling contiguous(),
// it is used to compute indices for each element in non-contiguous tensor
Tensor stride_contiguous = at::zeros({total_dims}, kLong);
int64_t* stride_contiguous_d = stride_contiguous.data<int64_t>();
for (int64_t i = total_dims - 1; i >= 0; i--) {
if (i == total_dims - 1) {
stride_contiguous_d[i] = 1;
} else {
stride_contiguous_d[i] = std::max<int64_t>(shape[i+1], 1) * stride_contiguous_d[i + 1];
}
}
AT_DISPATCH_ALL_TYPES_AND_HALF(in_tensor.type(), "flip_cuda", [&] {
flip_cuda_kernel<<<dim_grid, dim_block, 0, globalContext().getCurrentCUDAStream()>>>(
in_tensor.data<scalar_t>(), out_tensor.data<scalar_t>(), N, flip_dims_t.toType(CUDA(kLong)).data<int64_t>(), flip_dims_size,
strides_t.toType(CUDA(kLong)).data<int64_t>(), stride_contiguous.toType(CUDA(kLong)).data<int64_t>(), shape_t.toType(CUDA(kLong)).data<int64_t>(), total_dims);
});
return out_tensor;
}
}} // namespace at::native
|
afb4c41a2b31073039d5234b0c0f5c95544f8be7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/funcs/sequence_scale.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
namespace phi {
namespace funcs {
using phi::PADDLE_CUDA_NUM_THREADS;
template <typename T, int BlockSize>
__global__ void SequenceScaleKernel(T* seq,
size_t* lod,
const T* scales,
const size_t seq_width) {
for (int i = threadIdx.x;
i < (lod[blockIdx.x + 1] - lod[blockIdx.x]) * seq_width;
i += BlockSize) {
int idx = lod[blockIdx.x] * seq_width + i;
seq[idx] *= scales[blockIdx.x];
}
}
template <typename T>
class ScaleLoDTensorFunctor<phi::GPUContext, T> {
public:
void operator()(const phi::GPUContext& context,
const T* scales,
phi::DenseTensor* seq) {
const size_t level = 0;
auto lod = seq->lod();
const size_t num_seq = lod[level].size() - 1;
const size_t seq_width = seq->numel() / seq->dims()[0];
auto abs_offset_lod = paddle::framework::ToAbsOffset(lod);
T* seq_data = context.template Alloc<T>(seq);
paddle::framework::MixVector<size_t> mix_vector(&(abs_offset_lod[level]));
#ifdef PADDLE_WITH_HIP
hipLaunchKernelGGL(
HIP_KERNEL_NAME(SequenceScaleKernel<T, PADDLE_CUDA_NUM_THREADS>),
dim3(num_seq),
dim3(PADDLE_CUDA_NUM_THREADS),
0,
context.stream(),
seq_data,
mix_vector.CUDAMutableData(context.GetPlace()),
scales,
seq_width);
#else
hipLaunchKernelGGL(( SequenceScaleKernel<T, PADDLE_CUDA_NUM_THREADS>)
, dim3(num_seq), dim3(PADDLE_CUDA_NUM_THREADS), 0, context.stream(),
seq_data,
mix_vector.CUDAMutableData(context.GetPlace()),
scales,
seq_width);
#endif
mix_vector.CopyToCPU();
}
};
template class ScaleLoDTensorFunctor<phi::GPUContext, float>;
template class ScaleLoDTensorFunctor<phi::GPUContext, double>;
} // namespace funcs
} // namespace phi
|
afb4c41a2b31073039d5234b0c0f5c95544f8be7.cu
|
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/funcs/sequence_scale.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
namespace phi {
namespace funcs {
using phi::PADDLE_CUDA_NUM_THREADS;
template <typename T, int BlockSize>
__global__ void SequenceScaleKernel(T* seq,
size_t* lod,
const T* scales,
const size_t seq_width) {
for (int i = threadIdx.x;
i < (lod[blockIdx.x + 1] - lod[blockIdx.x]) * seq_width;
i += BlockSize) {
int idx = lod[blockIdx.x] * seq_width + i;
seq[idx] *= scales[blockIdx.x];
}
}
template <typename T>
class ScaleLoDTensorFunctor<phi::GPUContext, T> {
public:
void operator()(const phi::GPUContext& context,
const T* scales,
phi::DenseTensor* seq) {
const size_t level = 0;
auto lod = seq->lod();
const size_t num_seq = lod[level].size() - 1;
const size_t seq_width = seq->numel() / seq->dims()[0];
auto abs_offset_lod = paddle::framework::ToAbsOffset(lod);
T* seq_data = context.template Alloc<T>(seq);
paddle::framework::MixVector<size_t> mix_vector(&(abs_offset_lod[level]));
#ifdef PADDLE_WITH_HIP
hipLaunchKernelGGL(
HIP_KERNEL_NAME(SequenceScaleKernel<T, PADDLE_CUDA_NUM_THREADS>),
dim3(num_seq),
dim3(PADDLE_CUDA_NUM_THREADS),
0,
context.stream(),
seq_data,
mix_vector.CUDAMutableData(context.GetPlace()),
scales,
seq_width);
#else
SequenceScaleKernel<T, PADDLE_CUDA_NUM_THREADS>
<<<num_seq, PADDLE_CUDA_NUM_THREADS, 0, context.stream()>>>(
seq_data,
mix_vector.CUDAMutableData(context.GetPlace()),
scales,
seq_width);
#endif
mix_vector.CopyToCPU();
}
};
template class ScaleLoDTensorFunctor<phi::GPUContext, float>;
template class ScaleLoDTensorFunctor<phi::GPUContext, double>;
} // namespace funcs
} // namespace phi
|
436ef203076916bbdf61f014674d1b89a26a15c5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**********HEADERS**********/
#include <vector>
#include <algorithm>
#include <iostream>
#include <iomanip>
#include <string>
#include <limits>
#include <stdlib.h>
#include <fstream>
#include <math.h>
#include <time.h>
#include "cuda_ptr.cuh"
#include "mimo-io.cuh"
using namespace std;
/**********DEFINING CONSTANTS***********/
#define NX 192 //was 201
#define NY 192 //was 201
#define NT 401
#define NS 640 //number of sensors
#define HX 0.001f
#define HY 0.001f
#define H 0.001f
#define DT 3.3333e-07f
#define OMEGAC 7.8540e+05f
#define TAO 4.0000e-06f
#define TT 8.1573e-06f
/**********FUNCTION DECLARATION**********/
//Host Functions
void Ultrasonic_Tomography(const string&, int, int, float, int);
void Position_Transducers(host_ptr<int>, host_ptr<int>, int);
float norm(host_ptr<float>, int, int);
//In-Line Functions
inline int grid_size(int, int);
template <typename T> __host__ __device__ void minmax(T &a, T &b);
//Device Functions
__global__ void propagation(kernel_ptr<int> const, kernel_ptr<int> const, kernel_ptr<float> const, kernel_ptr<float>, int, int, int, int, int);
__global__ void propagation_at_corners(kernel_ptr<float>, int, int, int);
__global__ void difference_signal(kernel_ptr<float> const, kernel_ptr<float> const, kernel_ptr<float> const, kernel_ptr<float> const, kernel_ptr<float> const, kernel_ptr<float>, kernel_ptr<float>, kernel_ptr<float>, kernel_ptr<float>, int, int, int);
__global__ void backpropagation1(kernel_ptr<float>, kernel_ptr<float> const, int, int, int, int);
__global__ void backpropagation2(kernel_ptr<float>, kernel_ptr<float> const, kernel_ptr<float> const, kernel_ptr<float> const, kernel_ptr<float> const, int, int, int, int);
__global__ void laplace(kernel_ptr<float> const, kernel_ptr<float>, int, int, int);
__global__ void laplace_corners(kernel_ptr<float> const, kernel_ptr<float>, int, int, int);
__global__ void update_differential(kernel_ptr<float>, kernel_ptr<float>, kernel_ptr<float> const, kernel_ptr<float> const, kernel_ptr<float> const, int, int, int);
__global__ void update_field(kernel_ptr<float>, kernel_ptr<float> const, kernel_ptr<float>, kernel_ptr<float> const, float, int);
__global__ void weights_differential1(
kernel_ptr<float> norm,
kernel_ptr<float> const df,
kernel_ptr<float> const df_avg,
int Ng)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int g = threadIdx.z + blockIdx.z * blockDim.z;
if (i < NX && j < NY && g < Ng) {
float val = df(i, j, g) - (df_avg(i, j) / Ng);
atomicAdd(
&norm(i, g),
val * val);
}
}
__global__ void weights_differential2(
kernel_ptr<float> weights,
kernel_ptr<float> total_weight,
kernel_ptr<float> const norm,
int Ng)
{
int g = threadIdx.x + blockIdx.x * blockDim.x;
if (g < Ng) {
float sum = 0.f;
for (int i = 0; i < NX; ++i) {
sum += norm(i, g);
}
weights(g) = 1.f / sqrtf(sum);
atomicAdd(
&total_weight(0),
weights(g));
}
}
__global__ void average_differential(
kernel_ptr<float> df_avg,
kernel_ptr<float> const df,
kernel_ptr<float> const weights,
kernel_ptr<float> const total_weight,
int Ng)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int g = threadIdx.z + blockIdx.z * blockDim.z;
if (i < NX && j < NY && g < Ng) {
float weight = weights(g) / total_weight(0);
atomicAdd(
&df_avg(i, j),
df(i, j, g) * weight);
}
}
/***************MAIN PROGRAM***************/
int main(int argc, char **argv)
{
//Command Line Argument Processing
if (argc != 6) {
cerr << "Usage: " << argv[0] << " <fo_filename> <group size> <parallel groups> <target epsilon> <max iterations>\n\n";
exit(1);
}
string fo_filename = argv[1];
if (count(fo_filename.begin(), fo_filename.end(), '.') != 1) {
cerr << "Error: '" << fo_filename << "' should have only one period.\n"
<< " It should be in the current directory "
<< "and have only one filetype extension.\n\n";
exit(1);
}
int group_size = stoi(argv[2]);
int Np = stoi(argv[3]);
float target_epsilon = stof(argv[4]);
int max_iterations = stoi(argv[5]);
if (max_iterations == -1)
max_iterations = numeric_limits<int>::max();
cout << setprecision(9);
cerr << setprecision(9);
Ultrasonic_Tomography(fo_filename, group_size, Np, target_epsilon, max_iterations);
hipDeviceReset();
}
/**********HOST FUNCTION DEFINITIONS**********/
void Ultrasonic_Tomography(const string &fo_filename, int group_size, int Np, float target_epsilon, int max_iterations)
{
// fo(i, j) =
// ground truth value at pos (i, j) of field
host_ptr<float> fo(NX, NY);
device_ptr<float> dev_fo(NX, NY);
// Ng = number of sensor groups that will be launched in parallel
int Ng = NS / group_size;
// gg_xxx(i, k, g) =
// initial signal at pos i in row/column xxx
// at time k, from sensor group
// e.g g_bottom stores the bottom row,
// g_right stores the right column
device_ptr<float> dev_g_bottom(NX, NT, Ng);
device_ptr<float> dev_g_right(NY, NT, Ng);
device_ptr<float> dev_g_top(NX, NT, Ng);
device_ptr<float> dev_g_left(NY, NT, Ng);
host_ptr<float> g_bottom(NX, NT, Ng);
host_ptr<float> g_right(NY, NT, Ng);
host_ptr<float> g_top(NX, NT, Ng);
host_ptr<float> g_left(NY, NT, Ng);
auto idx = fo_filename.find('.');
string basename = fo_filename.substr(0, idx);
{
ifstream fo_in(fo_filename);
if (!fo_in) {
cerr << "Error: '" << fo_filename << "' file not found in current directory.\n\n";
return;
}
string prefix = basename + "-data-";
string suffix = "-" + to_string(group_size) + ".txt";
string gb_name = prefix + "bottom" + suffix;
string gr_name = prefix + "right" + suffix;
string gt_name = prefix + "top" + suffix;
string gl_name = prefix + "left" + suffix;
ifstream gb_in(gb_name);
ifstream gr_in(gr_name);
ifstream gt_in(gt_name);
ifstream gl_in(gl_name);
if (!gb_in) {
cerr << "Error: '" << gb_name << "' file not found in current directory.\n\n";
return;
}
if (!gr_in) {
cerr << "Error: '" << gr_name << "' file not found in current directory.\n\n";
return;
}
if (!gt_in) {
cerr << "Error: '" << gt_name << "' file not found in current directory.\n\n";
return;
}
if (!gl_in) {
cerr << "Error: '" << gl_name << "' file not found in current directory.\n\n";
return;
}
read(fo_in, fo);
copy(dev_fo, fo);
read(gb_in, g_bottom);
copy(dev_g_bottom, g_bottom);
read(gr_in, g_right);
copy(dev_g_right, g_right);
read(gt_in, g_top);
copy(dev_g_top, g_top);
read(gl_in, g_left);
copy(dev_g_left, g_left);
}
// Position of the transducers
host_ptr<int> ii(NS);
host_ptr<int> jj(NS);
device_ptr<int> dev_ii(NS);
device_ptr<int> dev_jj(NS);
Position_Transducers(ii, jj, NS);
// copy from host to device
copy(dev_ii, ii);
copy(dev_jj, jj);
// u(i, j, k, g) =
// wave propagation at pos (i, j) of field, at time k, from sensor group g
device_ptr<float> dev_u(NX, NY, NT, Np);
// Kaczmarz method
// propagation
// rr_xxx(i, k, g) =
// difference signal between xxx sensors in u and gg_xxx
// at time k, from sensor group g
device_ptr<float> dev_rr_bottom(NX, NT, Ng);
device_ptr<float> dev_rr_right(NX, NT, Ng);
device_ptr<float> dev_rr_top(NX, NT, Ng);
device_ptr<float> dev_rr_left(NX, NT, Ng);
// z(i, j, k, g) =
// wave back propagation at pos (i, j) of field,
// at time k, from sensor group g
device_ptr<float> dev_z(NX, NY, NT+1, Np);
// Lu(i, j, k, g) =
// result of applying the Laplace operator to u(i, j, k, g)
device_ptr<float> dev_Lu(NX, NY, NT, Np);
// f(i, j) =
// current reconstruction of field at pos (i, j)
host_ptr<float> f(NX, NY);
device_ptr<float> dev_f(NX, NY);
// df(i, j, g) =
// discretized differential of f(i, j) from sensor group g
device_ptr<float> dev_df(NX, NY, Ng);
device_ptr<float> dev_df_avg(NX, NY);
device_ptr<float> dev_norm(NX, Ng);
device_ptr<float> dev_weights(Ng);
device_ptr<float> dev_total_weight(1);
// f_minus_fo(i, j)
// difference of field and ground truth at pos (i, j)
host_ptr<float> f_minus_fo(NX, NY);
device_ptr<float> dev_f_minus_fo(NX, NY);
// initialize epsilon values
float prev_epsilon = 100.f;
float curr_epsilon = -std::numeric_limits<float>::infinity();
float file_epsilon = std::numeric_limits<float>::infinity();
/* cerr << "writing convergence to 'sirt_convergence.txt'...\n" */
/* << "writing time to 'sirt_time.txt'...\n\n"; */
ofstream convergence_file("sirt_convergence.txt");
ofstream time_file("sirt_time.txt");
// kernel launch parameters for propagation
dim3 threads_prop(32, 1, 4);
dim3 grid_prop(
grid_size(NX, threads_prop.x),
grid_size(NY, threads_prop.y),
grid_size(Np, threads_prop.z));
// kernel launch parameters for propagation_at_corners
dim3 threads_prop_corners(32, 1);
dim3 grid_prop_corners(
grid_size(NT, threads_prop_corners.x),
grid_size(Np, threads_prop_corners.y));
// kernel launch parameters for difference_signal
dim3 threads_diff_signal(NX, 1, 1);
dim3 grid_diff_signal(
grid_size(NX, threads_diff_signal.x),
grid_size(NT, threads_diff_signal.y),
grid_size(Np, threads_diff_signal.z));
// kernel launch parameters for backpropagation1
dim3 threads_bp1(64, 2, 1);
dim3 grid_bp1(
grid_size(NX, threads_bp1.x),
grid_size(NY, threads_bp1.y),
grid_size(Np, threads_bp1.z));
// kernel launch parameters for backpropagation2
dim3 threads_bp2(32, 1);
dim3 grid_bp2(
grid_size(NX, threads_bp2.x),
grid_size(Np, threads_bp2.y));
// kernel launch parameters for laplace
dim3 threads_L(32, 2, 2);
dim3 grid_L(
grid_size(NX * NY, threads_L.x),
grid_size(NT, threads_L.y),
grid_size(Np, threads_L.z));
// kernel launch parameters for laplace_corners
dim3 threads_L_corners(96, 1, 1);
dim3 grid_L_corners(
grid_size(NX * NY, threads_L.x),
grid_size(NT, threads_L.y),
grid_size(Np, threads_L.z));
// kernel launch parameters for update_differential
dim3 threads_diff(64, 2, 2);
dim3 grid_diff(
grid_size(NX * NY, threads_diff.x),
grid_size(NT, threads_diff.y),
grid_size(Np, threads_diff.z));
// kernel launch parameters for field kernels
dim3 threads_field(NX, 1);
dim3 grid_field(
grid_size(NX, threads_field.x),
grid_size(NY, threads_field.y));
dim3 threads_weights1(NX, 1, 1);
dim3 grid_weights1(
grid_size(NX, threads_weights1.x),
grid_size(NY, threads_weights1.y),
grid_size(Ng, threads_weights1.z));
dim3 threads_weights2(1);
dim3 grid_weights2(
grid_size(Ng, threads_weights2.x));
dim3 threads_avg_diff(NX, 1, 1);
dim3 grid_avg_diff(
grid_size(NX, threads_avg_diff.x),
grid_size(NY, threads_avg_diff.y),
grid_size(Ng, threads_avg_diff.z));
cerr << "group size: " << group_size << "\n"
<< "target epsilon: " << target_epsilon << "\n\n";
int w_iter = 6;
int w_eps = 12;
int w_diff = 15;
cout
<< setw(w_iter) << "iter" << " "
<< setw(w_eps) << "epsilon" << " "
<< setw(w_diff) << "difference" << " \n"
<< string(w_iter, '-') << " "
<< string(w_eps, '-') << " "
<< string(w_diff, '-') << " \n";
hipDeviceSynchronize();
int ti = clock();
std::vector<float> test_scales{0.f};
for (float f = 100.f; f <= 1e6; f += 100.f)
test_scales.push_back(f);
bool diverged = false;
bool reached = false;
int iter = 0;
float fo_norm = norm(fo, NX, NY);
while (!reached && !diverged && iter < max_iterations) {
++iter;
dev_df.set(0.f);
dev_df_avg.set(0.f);
dev_total_weight.set(0.f);
dev_norm.set(0.f);
int total_steps = ceil((float)Ng / Np);
for (int step = 0; step < total_steps; ++step) {
dev_u.set(0.f);
dev_z.set(0.f);
dev_Lu.set(0.f);
// propagate wave over field, store in u
for (int k = 1; k < NT - 1; ++k) {
hipLaunchKernelGGL(( propagation), dim3(grid_prop), dim3(threads_prop), 0, 0, dev_ii, dev_jj, dev_f, dev_u, k, group_size, step, Np, Ng);
}
hipLaunchKernelGGL(( propagation_at_corners), dim3(grid_prop_corners), dim3(threads_prop_corners), 0, 0, dev_u, step, Np, Ng);
// store difference signal of u at sensor positions and initial signal at g in rr
hipLaunchKernelGGL(( difference_signal), dim3(grid_diff_signal), dim3(threads_diff_signal), 0, 0, dev_u, dev_g_bottom, dev_g_right, dev_g_top, dev_g_left, dev_rr_bottom, dev_rr_right, dev_rr_top, dev_rr_left, step, Np, Ng);
// do back propagation of wave over field, store in z
for (int k = NT - 2; k > 0; k--) {
hipLaunchKernelGGL(( backpropagation1), dim3(grid_bp1), dim3(threads_bp1), 0, 0, dev_z, dev_f, k, step, Np, Ng);
hipLaunchKernelGGL(( backpropagation2), dim3(grid_bp2), dim3(threads_bp2), 0, 0, dev_z, dev_rr_bottom, dev_rr_right, dev_rr_top, dev_rr_left, k, step, Np, Ng);
}
// apply Laplace operator to u, store in Lu
hipLaunchKernelGGL(( laplace), dim3(grid_L), dim3(threads_L), 0, 0, dev_u, dev_Lu, step, Np, Ng);
hipLaunchKernelGGL(( laplace_corners), dim3(grid_L_corners), dim3(threads_L_corners), 0, 0, dev_u, dev_Lu, step, Np, Ng);
// update differential of f, store in df
hipLaunchKernelGGL(( update_differential), dim3(grid_diff), dim3(threads_diff), 0, 0, dev_df, dev_df_avg, dev_z, dev_Lu, dev_f, step, Np, Ng);
}
hipLaunchKernelGGL(( weights_differential1), dim3(grid_weights1), dim3(threads_weights1), 0, 0, dev_norm, dev_df, dev_df_avg, Ng);
dev_df_avg.set(0.f);
hipLaunchKernelGGL(( weights_differential2), dim3(grid_weights2), dim3(threads_weights2), 0, 0, dev_weights, dev_total_weight, dev_norm, Ng);
hipLaunchKernelGGL(( average_differential), dim3(grid_avg_diff), dim3(threads_avg_diff), 0, 0, dev_df_avg, dev_df, dev_weights, dev_total_weight, Ng);
device_ptr<float> test_f(NX, NY);
float scale{};
float min_epsilon = std::numeric_limits<float>::infinity();
/* cerr << "\n"; */
for (int i = 0; i < test_scales.size(); ++i) {
float test_scale = test_scales[i];
copy(test_f, dev_f);
hipLaunchKernelGGL(( update_field), dim3(grid_field), dim3(threads_field), 0, 0, test_f, dev_df_avg, dev_f_minus_fo, dev_fo, test_scale, Ng);
// copy from device to host
copy(f_minus_fo, dev_f_minus_fo);
float test_epsilon = norm(f_minus_fo, NX, NY) / fo_norm * 100.f;
/* cerr << test_epsilon << ", "; */
if (test_epsilon < min_epsilon) {
min_epsilon = test_epsilon;
scale = test_scale;
}
/* cerr << test_scale << " " << test_epsilon << "\n"; */
}
if (scale == 0.f) {
break;
}
// update f and f_minus_fo
hipLaunchKernelGGL(( update_field), dim3(grid_field), dim3(threads_field), 0, 0, dev_f, dev_df_avg, dev_f_minus_fo, dev_fo, scale, Ng);
// error calculation
// copy from device to host
copy(f_minus_fo, dev_f_minus_fo);
curr_epsilon = norm(f_minus_fo, NX, NY) / norm(fo, NX, NY) * 100.f;
float current_t = (float)(clock()-ti) / CLOCKS_PER_SEC;
if (abs(file_epsilon - curr_epsilon) > 0.2f) {
convergence_file << curr_epsilon << " ";
time_file << current_t << " ";
file_epsilon = curr_epsilon;
}
cout << setw(w_iter) << iter << " "
<< setw(w_eps) << curr_epsilon << " "
<< setw(w_diff) << prev_epsilon - curr_epsilon << " "
<< scale << " \n";
// check ending conditions
reached = curr_epsilon <= target_epsilon;
diverged = curr_epsilon > prev_epsilon || std::isnan(curr_epsilon);
// update prev_epsilon
prev_epsilon = curr_epsilon;
}
if (reached) {
cerr << "reached target epsilon: " << target_epsilon << ", at iter: " << iter << ", epsilon: " << curr_epsilon << "\n\n";
}
else if (diverged) {
cerr << "diverged at iter: " << iter << ", epsilon: " << curr_epsilon << "\n\n";
}
else {
cerr << "stopped at iter: " << iter << ", epsilon: " << curr_epsilon << "\n\n";
}
hipDeviceSynchronize();
int tf = clock();
float elapsed_time = (float)(tf - ti) / CLOCKS_PER_SEC;
cout << endl;
cerr << "total time (s): " << elapsed_time << "\n"
<< "per iteration (s): " << elapsed_time / iter << "\n";
// copy from device to host
copy(f, dev_f);
string f_name = "sirt-norm-" + to_string(group_size) + "-" + basename + ".txt";
/* cerr << "writing to '" << f_name << "'...\n\n"; */
ofstream f_out(f_name);
write(f_out, f);
size_t free, total;
hipMemGetInfo(&free, &total);
cerr << "used mem: " << float(total - free) / (1024 * 1024) << " MB\n"
<< "free mem: " << float(free) / (1024 * 1024) << " MB\n"
<< "total mem: " << float(total) / (1024 * 1024) << " MB\n\n";
}
float norm(host_ptr<float> A, int nx, int ny)
{
float sum = 0;
for (int j = 0; j < ny; ++j)
for (int i = 0; i < nx; ++i)
sum += A(i, j) * A(i, j);
return sqrtf(sum);
}
void Position_Transducers(host_ptr<int> ii, host_ptr<int> jj, int num)
{
//returns the (x,y) coordinates of the number of total transducers
int p = 0;
for(p = 0; p < 160; p++)
{
ii(p) = 21 + (p + 1);
jj(p) = 181;
}
for(p = 160; p < 320; p++)
{
ii(p) = 181;
jj(p) = 181 - ((p + 1) - 160);
}
for(p = 320; p < 480; p++)
{
ii(p) = 181 - ((p + 1) - 320);
jj(p) = 21;
}
for(p = 480; p < num; p++)
{
ii(p) = 21;
jj(p) = 21 + ((p + 1) - 480);
}
}
/**********DEVICE FUNCTION DEFINITIONS***********/
__global__ void propagation(
kernel_ptr<int> const ii,
kernel_ptr<int> const jj,
kernel_ptr<float> const f,
kernel_ptr<float> u,
int k, int group_size,
int step, int Np, int Ng)
{
// Map from threadIdx / BlockIdx to pixel position
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int p = threadIdx.z + blockIdx.z * blockDim.z;
int g = p + Np * step;
if(i < NX && j < NY && p < Np && g < Ng) {
float v = 1500.f * sqrtf(1.f + f(i, j));
float r = v * DT / HX;
float s = 2.f - 4.f * r * r;
float val; // will hold new u at (i, j, k + 1)
// not at boundary
if (i != 0 && i != NX - 1 && j != 0 && j != NY - 1) {
val =
r * r *
(u(i+1, j, k, p) +
u(i-1, j, k, p) +
u(i, j-1, k, p) +
u(i, j+1, k, p)) +
s * u(i, j, k, p) -
u(i, j, k-1, p);
int sensor_idx = g * group_size;
int jp1 = jj(sensor_idx);
int jp2 = jj(sensor_idx + group_size - 1);
int ip1 = ii(sensor_idx);
int ip2 = ii(sensor_idx + group_size - 1);
minmax(jp1, jp2);
minmax(ip1, ip2);
// at sensor, k <= 24
if (j + 1 >= jp1 && j + 1 <= jp2 && i + 1 >= ip1 && i + 1 <= ip2 && k + 1 <= 24) {
float t = k * DT - TT;
// add wave value
val +=
v * v * DT * DT *
cosf(OMEGAC * t) *
expf(-(t * t) / (2.f * TAO * TAO));
}
}
// at boundary
else {
// boundary booleans
bool top = (j == 0);
bool bottom = (j == NY - 1);
bool left = (i == 0);
bool right = (i == NX - 1);
// index variables for different boundary cases
int ja = top ? (j + 1) : bottom ? (j - 1) : j;
int jb = top ? (j + 2) : bottom ? (j - 2) : j;
int ia = left ? (i + 1) : right ? (i - 1) : i;
int ib = left ? (i + 2) : right ? (i - 2) : i;
val =
(2.f - 2.f * r - r * r) * u(i, j, k, p) +
2.f * r * (1.f + r) * u(ia, ja, k, p) -
r * r * u(ib, jb, k, p) +
(2.f * r - 1.f) * u(i, j, k-1, p) -
2.f * r * u(ia, ja, k-1, p);
}
u(i, j, k+1, p) = val;
}
}
__global__ void propagation_at_corners(
kernel_ptr<float> u,
int step, int Np, int Ng)
{
int k = threadIdx.x + blockIdx.x * blockDim.x;
int p = threadIdx.y + blockIdx.y * blockDim.y;
int g = p + Np * step;
if (k < NT && g < Ng && p < Np) {
u(0, 0, k, p) =
1.f / 2.f * (u(0, 1, k, p) + u(1, 0, k, p));
u(NX-1, 0, k, p) =
1.f / 2.f * (u(NX-2, 0, k, p) + u(NX-1, 1, k, p));
u(0, NY-1, k, p) =
1.f / 2.f * (u(0, NY-2, k, p) + u(1, NY-1, k, p));
u(NX-1, NY-1, k, p) =
1.f / 2.f * (u(NX-2, NY-1, k, p) + u(NX-1, NY-2, k, p));
}
}
__global__ void difference_signal(
kernel_ptr<float> const u,
kernel_ptr<float> const g_bottom,
kernel_ptr<float> const g_right,
kernel_ptr<float> const g_top,
kernel_ptr<float> const g_left,
kernel_ptr<float> rr_bottom,
kernel_ptr<float> rr_right,
kernel_ptr<float> rr_top,
kernel_ptr<float> rr_left,
int step, int Np, int Ng)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int k = threadIdx.y + blockIdx.y * blockDim.y;
int p = threadIdx.z + blockIdx.z * blockDim.z;
int g = p + Np * step;
if (i > 20 && i < 180 && k > 1 && k < NT && g < Ng && p < Np) {
// store difference at time k of original signal
// and current signal at bottom sensor row
rr_bottom(i, k, g) = g_bottom(i, k, g) - u(i, 180, k, p);
// store difference at time k of original signal
// and current signal at top sensor row
rr_top(i, k, g) = g_top(i, k, g) - u(i, 20, k, p);
// store difference at time k of original signal
// and current signal at right sensor column
rr_right(i, k, g) = g_right(i, k, g) - u(180, i, k, p);
// store difference at time k of original signal
// and current signal at left sensor column
rr_left(i, k, g) = g_left(i, k, g) - u(20, i, k, p);
}
}
__global__ void backpropagation1(
kernel_ptr<float> z,
kernel_ptr<float> const f,
int k, int step, int Np, int Ng)
{
// Map from threadIdx / BlockIdx to pixel position
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int p = threadIdx.z + blockIdx.z * blockDim.z;
int g = p + Np * step;
if(i >= 1 && i < (NX - 1) && j >= 1 && j < (NY - 1) && g < Ng && p < Np)
{
z(i, j, k, p) =
1500.f * 1500.f * (DT * DT) *
((1.f + f(i, j-1)) * z(i, j-1, k+1, p) +
(1.f + f(i, j+1)) * z(i, j+1, k+1, p) +
(1.f + f(i-1, j)) * z(i-1, j, k+1, p) +
(1.f + f(i+1, j)) * z(i+1, j, k+1, p) -
4.f * (1.f + f(i, j)) *
z(i, j, k+1, p)) / (H * H) +
2.f * z(i, j, k+1, p) -
z(i, j, k+2, p);
}
}
__global__ void backpropagation2(
kernel_ptr<float> z,
kernel_ptr<float> const rr_bottom,
kernel_ptr<float> const rr_right,
kernel_ptr<float> const rr_top,
kernel_ptr<float> const rr_left,
int k, int step, int Np, int Ng)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int p = threadIdx.y + blockIdx.y * blockDim.y;
int g = p + Np * step;
if (g < Ng && p < Np) {
if(i >= 21 && i < 180) {
z(i, 180, k, p) =
z(i, 179, k, p) +
rr_bottom(i, k, g) * H * 1000.f;
z(i, 20, k, p) =
z(i, 21, k, p) +
rr_top(i, k, g) * H * 1000.f;
z(180, i, k, p) =
z(179, i, k, p) +
rr_right(i, k, g) * H * 1000.f;
z(20, i, k, p) =
z(21, i, k, p) +
rr_left(i, k, g) * H * 1000.f;
}
if (i >= 1 && i < (NX - 1)) {
z(i, 0, k, p) =
z(i, 1, k, p);
z(i, NY-1, k, p) =
z(i, NY-2, k, p);
z(0, i, k, p) =
z(1, i, k, p);
z(NX-1, i, k, p) =
z(NX-2, i, k, p);
}
else if (i == 0) {
z(0, 0, k, p) =
(z(1, 0, k, p) +
z(0, 1, k, p)) / 2.f;
z(NX-1, 0, k, p) =
(z(NX-2, 0, k, p) +
z(NX-1, 1, k, p)) / 2.f;
z(0, NY-1, k, p) =
(z(1, NY-1, k, p) +
z(0, NY-2, k, p)) / 2.f;
z(NX-1, NY-1, k, p) =
(z(NX-2, NY-1, k, p) +
z(NX-1, NY-2, k, p)) / 2.f;
}
}
}
__global__ void laplace(
kernel_ptr<float> const u,
kernel_ptr<float> Lu,
int step, int Np, int Ng)
{
// Map from threadIdx / BlockIdx to pixel position
int tx = threadIdx.x + blockIdx.x * blockDim.x;
int k = threadIdx.y + blockIdx.y * blockDim.y;
int p = threadIdx.z + blockIdx.z * blockDim.z;
int g = p + Np * step;
if (tx < (NX * NY) && (k + 1) < NT && g < Ng && p < Np) {
int i = tx % NX;
int j = tx / NX;
int ja = (j > 0) ? (j - 1) : j;
int jb = (j < NY - 1) ? (j + 1) : j;
int ia = (i > 0) ? (i - 1) : i;
int ib = (i < NX - 1) ? (i + 1) : i;
Lu(i, j, k+1, p) =
(u(i, ja, k+1, p) +
u(i, jb, k+1, p) +
u(ia, j, k+1, p) +
u(ib, j, k+1, p) -
4.f * u(i, j, k+1, p)) / (H * H);
}
}
__global__ void laplace_corners(
kernel_ptr<float> const u,
kernel_ptr<float> Lu,
int step, int Np, int Ng)
{
int k = threadIdx.x + blockIdx.x * blockDim.x;
int p = threadIdx.y + blockIdx.y * blockDim.y;
int g = p + Np * step;
if ((k + 1) < NT && g < Ng && p < Np) {
Lu(0, 0, k+1, p) =
(Lu(1, 0, k+1, p) +
Lu(0, 1, k+1, p)) / 2.f;
Lu(NX-1, 0, k+1, p) =
(Lu(NX-2, 0, k+1, p) +
Lu(NX-1, 1, k+1, p)) / 2.f;
Lu(0, NY-1, k+1, p) =
(Lu(1, NY-1, k+1, p) +
Lu(0, NY-2, k+1, p)) / 2.f;
Lu(NX-1, NY-1, k+1, p) =
(Lu(NX-2, NY-1, k+1, p) +
Lu(NX-1, NY-2, k+1, p)) / 2.f;
}
}
__global__ void update_differential(
kernel_ptr<float> df,
kernel_ptr<float> df_avg,
kernel_ptr<float> const z,
kernel_ptr<float> const Lu,
kernel_ptr<float> const f,
int step, int Np, int Ng)
{
// Map from threadIdx / BlockIdx to pixel position
int tx = threadIdx.x + blockIdx.x * blockDim.x;
int k = threadIdx.y + blockIdx.y * blockDim.y;
int p = threadIdx.z + blockIdx.z * blockDim.z;
int g = p + Np * step;
if (tx < (NX * NY) && (k + 1) < NT && g < Ng && p < Np) {
int i = tx % NX;
int j = tx / NX;
float val =
z(i, j, k+1, p) *
Lu(i, j, k+1, p) /
(1.f + f(i, j));
atomicAdd(&df(i, j, g), val);
atomicAdd(&df_avg(i, j), val);
}
}
__global__ void update_field(
kernel_ptr<float> f,
kernel_ptr<float> const df_avg,
kernel_ptr<float> f_minus_fo,
kernel_ptr<float> const fo,
float scale,
int Ng)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < NX && j < NY)
{
bool in_sensor_field = (i >= 21) && (i < 180) && (j >= 21) && (j < 180);
if (in_sensor_field)
f(i, j) += scale * df_avg(i, j);
f_minus_fo(i, j) = f(i, j) - fo(i, j);
}
}
/**********INLINE FUNCTION DEFINITIONS**********/
inline int grid_size(int n, int threads)
{
return ceil(float(n) / threads);
}
// POST-CONDITION: a <= b
template <typename T>
__host__ __device__
void minmax(T &a, T &b)
{
if (a > b) {
int t = a;
a = b;
b = t;
}
}
|
436ef203076916bbdf61f014674d1b89a26a15c5.cu
|
/**********HEADERS**********/
#include <vector>
#include <algorithm>
#include <iostream>
#include <iomanip>
#include <string>
#include <limits>
#include <stdlib.h>
#include <fstream>
#include <math.h>
#include <time.h>
#include "cuda_ptr.cuh"
#include "mimo-io.cuh"
using namespace std;
/**********DEFINING CONSTANTS***********/
#define NX 192 //was 201
#define NY 192 //was 201
#define NT 401
#define NS 640 //number of sensors
#define HX 0.001f
#define HY 0.001f
#define H 0.001f
#define DT 3.3333e-07f
#define OMEGAC 7.8540e+05f
#define TAO 4.0000e-06f
#define TT 8.1573e-06f
/**********FUNCTION DECLARATION**********/
//Host Functions
void Ultrasonic_Tomography(const string&, int, int, float, int);
void Position_Transducers(host_ptr<int>, host_ptr<int>, int);
float norm(host_ptr<float>, int, int);
//In-Line Functions
inline int grid_size(int, int);
template <typename T> __host__ __device__ void minmax(T &a, T &b);
//Device Functions
__global__ void propagation(kernel_ptr<int> const, kernel_ptr<int> const, kernel_ptr<float> const, kernel_ptr<float>, int, int, int, int, int);
__global__ void propagation_at_corners(kernel_ptr<float>, int, int, int);
__global__ void difference_signal(kernel_ptr<float> const, kernel_ptr<float> const, kernel_ptr<float> const, kernel_ptr<float> const, kernel_ptr<float> const, kernel_ptr<float>, kernel_ptr<float>, kernel_ptr<float>, kernel_ptr<float>, int, int, int);
__global__ void backpropagation1(kernel_ptr<float>, kernel_ptr<float> const, int, int, int, int);
__global__ void backpropagation2(kernel_ptr<float>, kernel_ptr<float> const, kernel_ptr<float> const, kernel_ptr<float> const, kernel_ptr<float> const, int, int, int, int);
__global__ void laplace(kernel_ptr<float> const, kernel_ptr<float>, int, int, int);
__global__ void laplace_corners(kernel_ptr<float> const, kernel_ptr<float>, int, int, int);
__global__ void update_differential(kernel_ptr<float>, kernel_ptr<float>, kernel_ptr<float> const, kernel_ptr<float> const, kernel_ptr<float> const, int, int, int);
__global__ void update_field(kernel_ptr<float>, kernel_ptr<float> const, kernel_ptr<float>, kernel_ptr<float> const, float, int);
__global__ void weights_differential1(
kernel_ptr<float> norm,
kernel_ptr<float> const df,
kernel_ptr<float> const df_avg,
int Ng)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int g = threadIdx.z + blockIdx.z * blockDim.z;
if (i < NX && j < NY && g < Ng) {
float val = df(i, j, g) - (df_avg(i, j) / Ng);
atomicAdd(
&norm(i, g),
val * val);
}
}
__global__ void weights_differential2(
kernel_ptr<float> weights,
kernel_ptr<float> total_weight,
kernel_ptr<float> const norm,
int Ng)
{
int g = threadIdx.x + blockIdx.x * blockDim.x;
if (g < Ng) {
float sum = 0.f;
for (int i = 0; i < NX; ++i) {
sum += norm(i, g);
}
weights(g) = 1.f / sqrtf(sum);
atomicAdd(
&total_weight(0),
weights(g));
}
}
__global__ void average_differential(
kernel_ptr<float> df_avg,
kernel_ptr<float> const df,
kernel_ptr<float> const weights,
kernel_ptr<float> const total_weight,
int Ng)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int g = threadIdx.z + blockIdx.z * blockDim.z;
if (i < NX && j < NY && g < Ng) {
float weight = weights(g) / total_weight(0);
atomicAdd(
&df_avg(i, j),
df(i, j, g) * weight);
}
}
/***************MAIN PROGRAM***************/
int main(int argc, char **argv)
{
//Command Line Argument Processing
if (argc != 6) {
cerr << "Usage: " << argv[0] << " <fo_filename> <group size> <parallel groups> <target epsilon> <max iterations>\n\n";
exit(1);
}
string fo_filename = argv[1];
if (count(fo_filename.begin(), fo_filename.end(), '.') != 1) {
cerr << "Error: '" << fo_filename << "' should have only one period.\n"
<< " It should be in the current directory "
<< "and have only one filetype extension.\n\n";
exit(1);
}
int group_size = stoi(argv[2]);
int Np = stoi(argv[3]);
float target_epsilon = stof(argv[4]);
int max_iterations = stoi(argv[5]);
if (max_iterations == -1)
max_iterations = numeric_limits<int>::max();
cout << setprecision(9);
cerr << setprecision(9);
Ultrasonic_Tomography(fo_filename, group_size, Np, target_epsilon, max_iterations);
cudaDeviceReset();
}
/**********HOST FUNCTION DEFINITIONS**********/
void Ultrasonic_Tomography(const string &fo_filename, int group_size, int Np, float target_epsilon, int max_iterations)
{
// fo(i, j) =
// ground truth value at pos (i, j) of field
host_ptr<float> fo(NX, NY);
device_ptr<float> dev_fo(NX, NY);
// Ng = number of sensor groups that will be launched in parallel
int Ng = NS / group_size;
// gg_xxx(i, k, g) =
// initial signal at pos i in row/column xxx
// at time k, from sensor group
// e.g g_bottom stores the bottom row,
// g_right stores the right column
device_ptr<float> dev_g_bottom(NX, NT, Ng);
device_ptr<float> dev_g_right(NY, NT, Ng);
device_ptr<float> dev_g_top(NX, NT, Ng);
device_ptr<float> dev_g_left(NY, NT, Ng);
host_ptr<float> g_bottom(NX, NT, Ng);
host_ptr<float> g_right(NY, NT, Ng);
host_ptr<float> g_top(NX, NT, Ng);
host_ptr<float> g_left(NY, NT, Ng);
auto idx = fo_filename.find('.');
string basename = fo_filename.substr(0, idx);
{
ifstream fo_in(fo_filename);
if (!fo_in) {
cerr << "Error: '" << fo_filename << "' file not found in current directory.\n\n";
return;
}
string prefix = basename + "-data-";
string suffix = "-" + to_string(group_size) + ".txt";
string gb_name = prefix + "bottom" + suffix;
string gr_name = prefix + "right" + suffix;
string gt_name = prefix + "top" + suffix;
string gl_name = prefix + "left" + suffix;
ifstream gb_in(gb_name);
ifstream gr_in(gr_name);
ifstream gt_in(gt_name);
ifstream gl_in(gl_name);
if (!gb_in) {
cerr << "Error: '" << gb_name << "' file not found in current directory.\n\n";
return;
}
if (!gr_in) {
cerr << "Error: '" << gr_name << "' file not found in current directory.\n\n";
return;
}
if (!gt_in) {
cerr << "Error: '" << gt_name << "' file not found in current directory.\n\n";
return;
}
if (!gl_in) {
cerr << "Error: '" << gl_name << "' file not found in current directory.\n\n";
return;
}
read(fo_in, fo);
copy(dev_fo, fo);
read(gb_in, g_bottom);
copy(dev_g_bottom, g_bottom);
read(gr_in, g_right);
copy(dev_g_right, g_right);
read(gt_in, g_top);
copy(dev_g_top, g_top);
read(gl_in, g_left);
copy(dev_g_left, g_left);
}
// Position of the transducers
host_ptr<int> ii(NS);
host_ptr<int> jj(NS);
device_ptr<int> dev_ii(NS);
device_ptr<int> dev_jj(NS);
Position_Transducers(ii, jj, NS);
// copy from host to device
copy(dev_ii, ii);
copy(dev_jj, jj);
// u(i, j, k, g) =
// wave propagation at pos (i, j) of field, at time k, from sensor group g
device_ptr<float> dev_u(NX, NY, NT, Np);
// Kaczmarz method
// propagation
// rr_xxx(i, k, g) =
// difference signal between xxx sensors in u and gg_xxx
// at time k, from sensor group g
device_ptr<float> dev_rr_bottom(NX, NT, Ng);
device_ptr<float> dev_rr_right(NX, NT, Ng);
device_ptr<float> dev_rr_top(NX, NT, Ng);
device_ptr<float> dev_rr_left(NX, NT, Ng);
// z(i, j, k, g) =
// wave back propagation at pos (i, j) of field,
// at time k, from sensor group g
device_ptr<float> dev_z(NX, NY, NT+1, Np);
// Lu(i, j, k, g) =
// result of applying the Laplace operator to u(i, j, k, g)
device_ptr<float> dev_Lu(NX, NY, NT, Np);
// f(i, j) =
// current reconstruction of field at pos (i, j)
host_ptr<float> f(NX, NY);
device_ptr<float> dev_f(NX, NY);
// df(i, j, g) =
// discretized differential of f(i, j) from sensor group g
device_ptr<float> dev_df(NX, NY, Ng);
device_ptr<float> dev_df_avg(NX, NY);
device_ptr<float> dev_norm(NX, Ng);
device_ptr<float> dev_weights(Ng);
device_ptr<float> dev_total_weight(1);
// f_minus_fo(i, j)
// difference of field and ground truth at pos (i, j)
host_ptr<float> f_minus_fo(NX, NY);
device_ptr<float> dev_f_minus_fo(NX, NY);
// initialize epsilon values
float prev_epsilon = 100.f;
float curr_epsilon = -std::numeric_limits<float>::infinity();
float file_epsilon = std::numeric_limits<float>::infinity();
/* cerr << "writing convergence to 'sirt_convergence.txt'...\n" */
/* << "writing time to 'sirt_time.txt'...\n\n"; */
ofstream convergence_file("sirt_convergence.txt");
ofstream time_file("sirt_time.txt");
// kernel launch parameters for propagation
dim3 threads_prop(32, 1, 4);
dim3 grid_prop(
grid_size(NX, threads_prop.x),
grid_size(NY, threads_prop.y),
grid_size(Np, threads_prop.z));
// kernel launch parameters for propagation_at_corners
dim3 threads_prop_corners(32, 1);
dim3 grid_prop_corners(
grid_size(NT, threads_prop_corners.x),
grid_size(Np, threads_prop_corners.y));
// kernel launch parameters for difference_signal
dim3 threads_diff_signal(NX, 1, 1);
dim3 grid_diff_signal(
grid_size(NX, threads_diff_signal.x),
grid_size(NT, threads_diff_signal.y),
grid_size(Np, threads_diff_signal.z));
// kernel launch parameters for backpropagation1
dim3 threads_bp1(64, 2, 1);
dim3 grid_bp1(
grid_size(NX, threads_bp1.x),
grid_size(NY, threads_bp1.y),
grid_size(Np, threads_bp1.z));
// kernel launch parameters for backpropagation2
dim3 threads_bp2(32, 1);
dim3 grid_bp2(
grid_size(NX, threads_bp2.x),
grid_size(Np, threads_bp2.y));
// kernel launch parameters for laplace
dim3 threads_L(32, 2, 2);
dim3 grid_L(
grid_size(NX * NY, threads_L.x),
grid_size(NT, threads_L.y),
grid_size(Np, threads_L.z));
// kernel launch parameters for laplace_corners
dim3 threads_L_corners(96, 1, 1);
dim3 grid_L_corners(
grid_size(NX * NY, threads_L.x),
grid_size(NT, threads_L.y),
grid_size(Np, threads_L.z));
// kernel launch parameters for update_differential
dim3 threads_diff(64, 2, 2);
dim3 grid_diff(
grid_size(NX * NY, threads_diff.x),
grid_size(NT, threads_diff.y),
grid_size(Np, threads_diff.z));
// kernel launch parameters for field kernels
dim3 threads_field(NX, 1);
dim3 grid_field(
grid_size(NX, threads_field.x),
grid_size(NY, threads_field.y));
dim3 threads_weights1(NX, 1, 1);
dim3 grid_weights1(
grid_size(NX, threads_weights1.x),
grid_size(NY, threads_weights1.y),
grid_size(Ng, threads_weights1.z));
dim3 threads_weights2(1);
dim3 grid_weights2(
grid_size(Ng, threads_weights2.x));
dim3 threads_avg_diff(NX, 1, 1);
dim3 grid_avg_diff(
grid_size(NX, threads_avg_diff.x),
grid_size(NY, threads_avg_diff.y),
grid_size(Ng, threads_avg_diff.z));
cerr << "group size: " << group_size << "\n"
<< "target epsilon: " << target_epsilon << "\n\n";
int w_iter = 6;
int w_eps = 12;
int w_diff = 15;
cout
<< setw(w_iter) << "iter" << " "
<< setw(w_eps) << "epsilon" << " "
<< setw(w_diff) << "difference" << " \n"
<< string(w_iter, '-') << " "
<< string(w_eps, '-') << " "
<< string(w_diff, '-') << " \n";
cudaDeviceSynchronize();
int ti = clock();
std::vector<float> test_scales{0.f};
for (float f = 100.f; f <= 1e6; f += 100.f)
test_scales.push_back(f);
bool diverged = false;
bool reached = false;
int iter = 0;
float fo_norm = norm(fo, NX, NY);
while (!reached && !diverged && iter < max_iterations) {
++iter;
dev_df.set(0.f);
dev_df_avg.set(0.f);
dev_total_weight.set(0.f);
dev_norm.set(0.f);
int total_steps = ceil((float)Ng / Np);
for (int step = 0; step < total_steps; ++step) {
dev_u.set(0.f);
dev_z.set(0.f);
dev_Lu.set(0.f);
// propagate wave over field, store in u
for (int k = 1; k < NT - 1; ++k) {
propagation<<<grid_prop, threads_prop>>>(dev_ii, dev_jj, dev_f, dev_u, k, group_size, step, Np, Ng);
}
propagation_at_corners<<<grid_prop_corners, threads_prop_corners>>>(dev_u, step, Np, Ng);
// store difference signal of u at sensor positions and initial signal at g in rr
difference_signal<<<grid_diff_signal, threads_diff_signal>>>(dev_u, dev_g_bottom, dev_g_right, dev_g_top, dev_g_left, dev_rr_bottom, dev_rr_right, dev_rr_top, dev_rr_left, step, Np, Ng);
// do back propagation of wave over field, store in z
for (int k = NT - 2; k > 0; k--) {
backpropagation1<<<grid_bp1, threads_bp1>>>(dev_z, dev_f, k, step, Np, Ng);
backpropagation2<<<grid_bp2, threads_bp2>>>(dev_z, dev_rr_bottom, dev_rr_right, dev_rr_top, dev_rr_left, k, step, Np, Ng);
}
// apply Laplace operator to u, store in Lu
laplace<<<grid_L, threads_L>>>(dev_u, dev_Lu, step, Np, Ng);
laplace_corners<<<grid_L_corners, threads_L_corners>>>(dev_u, dev_Lu, step, Np, Ng);
// update differential of f, store in df
update_differential<<<grid_diff, threads_diff>>>(dev_df, dev_df_avg, dev_z, dev_Lu, dev_f, step, Np, Ng);
}
weights_differential1<<<grid_weights1, threads_weights1>>>(dev_norm, dev_df, dev_df_avg, Ng);
dev_df_avg.set(0.f);
weights_differential2<<<grid_weights2, threads_weights2>>>(dev_weights, dev_total_weight, dev_norm, Ng);
average_differential<<<grid_avg_diff, threads_avg_diff>>>(dev_df_avg, dev_df, dev_weights, dev_total_weight, Ng);
device_ptr<float> test_f(NX, NY);
float scale{};
float min_epsilon = std::numeric_limits<float>::infinity();
/* cerr << "\n"; */
for (int i = 0; i < test_scales.size(); ++i) {
float test_scale = test_scales[i];
copy(test_f, dev_f);
update_field<<<grid_field, threads_field>>>(test_f, dev_df_avg, dev_f_minus_fo, dev_fo, test_scale, Ng);
// copy from device to host
copy(f_minus_fo, dev_f_minus_fo);
float test_epsilon = norm(f_minus_fo, NX, NY) / fo_norm * 100.f;
/* cerr << test_epsilon << ", "; */
if (test_epsilon < min_epsilon) {
min_epsilon = test_epsilon;
scale = test_scale;
}
/* cerr << test_scale << " " << test_epsilon << "\n"; */
}
if (scale == 0.f) {
break;
}
// update f and f_minus_fo
update_field<<<grid_field, threads_field>>>(dev_f, dev_df_avg, dev_f_minus_fo, dev_fo, scale, Ng);
// error calculation
// copy from device to host
copy(f_minus_fo, dev_f_minus_fo);
curr_epsilon = norm(f_minus_fo, NX, NY) / norm(fo, NX, NY) * 100.f;
float current_t = (float)(clock()-ti) / CLOCKS_PER_SEC;
if (abs(file_epsilon - curr_epsilon) > 0.2f) {
convergence_file << curr_epsilon << " ";
time_file << current_t << " ";
file_epsilon = curr_epsilon;
}
cout << setw(w_iter) << iter << " "
<< setw(w_eps) << curr_epsilon << " "
<< setw(w_diff) << prev_epsilon - curr_epsilon << " "
<< scale << " \n";
// check ending conditions
reached = curr_epsilon <= target_epsilon;
diverged = curr_epsilon > prev_epsilon || std::isnan(curr_epsilon);
// update prev_epsilon
prev_epsilon = curr_epsilon;
}
if (reached) {
cerr << "reached target epsilon: " << target_epsilon << ", at iter: " << iter << ", epsilon: " << curr_epsilon << "\n\n";
}
else if (diverged) {
cerr << "diverged at iter: " << iter << ", epsilon: " << curr_epsilon << "\n\n";
}
else {
cerr << "stopped at iter: " << iter << ", epsilon: " << curr_epsilon << "\n\n";
}
cudaDeviceSynchronize();
int tf = clock();
float elapsed_time = (float)(tf - ti) / CLOCKS_PER_SEC;
cout << endl;
cerr << "total time (s): " << elapsed_time << "\n"
<< "per iteration (s): " << elapsed_time / iter << "\n";
// copy from device to host
copy(f, dev_f);
string f_name = "sirt-norm-" + to_string(group_size) + "-" + basename + ".txt";
/* cerr << "writing to '" << f_name << "'...\n\n"; */
ofstream f_out(f_name);
write(f_out, f);
size_t free, total;
cudaMemGetInfo(&free, &total);
cerr << "used mem: " << float(total - free) / (1024 * 1024) << " MB\n"
<< "free mem: " << float(free) / (1024 * 1024) << " MB\n"
<< "total mem: " << float(total) / (1024 * 1024) << " MB\n\n";
}
float norm(host_ptr<float> A, int nx, int ny)
{
float sum = 0;
for (int j = 0; j < ny; ++j)
for (int i = 0; i < nx; ++i)
sum += A(i, j) * A(i, j);
return sqrtf(sum);
}
void Position_Transducers(host_ptr<int> ii, host_ptr<int> jj, int num)
{
//returns the (x,y) coordinates of the number of total transducers
int p = 0;
for(p = 0; p < 160; p++)
{
ii(p) = 21 + (p + 1);
jj(p) = 181;
}
for(p = 160; p < 320; p++)
{
ii(p) = 181;
jj(p) = 181 - ((p + 1) - 160);
}
for(p = 320; p < 480; p++)
{
ii(p) = 181 - ((p + 1) - 320);
jj(p) = 21;
}
for(p = 480; p < num; p++)
{
ii(p) = 21;
jj(p) = 21 + ((p + 1) - 480);
}
}
/**********DEVICE FUNCTION DEFINITIONS***********/
__global__ void propagation(
kernel_ptr<int> const ii,
kernel_ptr<int> const jj,
kernel_ptr<float> const f,
kernel_ptr<float> u,
int k, int group_size,
int step, int Np, int Ng)
{
// Map from threadIdx / BlockIdx to pixel position
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int p = threadIdx.z + blockIdx.z * blockDim.z;
int g = p + Np * step;
if(i < NX && j < NY && p < Np && g < Ng) {
float v = 1500.f * sqrtf(1.f + f(i, j));
float r = v * DT / HX;
float s = 2.f - 4.f * r * r;
float val; // will hold new u at (i, j, k + 1)
// not at boundary
if (i != 0 && i != NX - 1 && j != 0 && j != NY - 1) {
val =
r * r *
(u(i+1, j, k, p) +
u(i-1, j, k, p) +
u(i, j-1, k, p) +
u(i, j+1, k, p)) +
s * u(i, j, k, p) -
u(i, j, k-1, p);
int sensor_idx = g * group_size;
int jp1 = jj(sensor_idx);
int jp2 = jj(sensor_idx + group_size - 1);
int ip1 = ii(sensor_idx);
int ip2 = ii(sensor_idx + group_size - 1);
minmax(jp1, jp2);
minmax(ip1, ip2);
// at sensor, k <= 24
if (j + 1 >= jp1 && j + 1 <= jp2 && i + 1 >= ip1 && i + 1 <= ip2 && k + 1 <= 24) {
float t = k * DT - TT;
// add wave value
val +=
v * v * DT * DT *
cosf(OMEGAC * t) *
expf(-(t * t) / (2.f * TAO * TAO));
}
}
// at boundary
else {
// boundary booleans
bool top = (j == 0);
bool bottom = (j == NY - 1);
bool left = (i == 0);
bool right = (i == NX - 1);
// index variables for different boundary cases
int ja = top ? (j + 1) : bottom ? (j - 1) : j;
int jb = top ? (j + 2) : bottom ? (j - 2) : j;
int ia = left ? (i + 1) : right ? (i - 1) : i;
int ib = left ? (i + 2) : right ? (i - 2) : i;
val =
(2.f - 2.f * r - r * r) * u(i, j, k, p) +
2.f * r * (1.f + r) * u(ia, ja, k, p) -
r * r * u(ib, jb, k, p) +
(2.f * r - 1.f) * u(i, j, k-1, p) -
2.f * r * u(ia, ja, k-1, p);
}
u(i, j, k+1, p) = val;
}
}
__global__ void propagation_at_corners(
kernel_ptr<float> u,
int step, int Np, int Ng)
{
int k = threadIdx.x + blockIdx.x * blockDim.x;
int p = threadIdx.y + blockIdx.y * blockDim.y;
int g = p + Np * step;
if (k < NT && g < Ng && p < Np) {
u(0, 0, k, p) =
1.f / 2.f * (u(0, 1, k, p) + u(1, 0, k, p));
u(NX-1, 0, k, p) =
1.f / 2.f * (u(NX-2, 0, k, p) + u(NX-1, 1, k, p));
u(0, NY-1, k, p) =
1.f / 2.f * (u(0, NY-2, k, p) + u(1, NY-1, k, p));
u(NX-1, NY-1, k, p) =
1.f / 2.f * (u(NX-2, NY-1, k, p) + u(NX-1, NY-2, k, p));
}
}
__global__ void difference_signal(
kernel_ptr<float> const u,
kernel_ptr<float> const g_bottom,
kernel_ptr<float> const g_right,
kernel_ptr<float> const g_top,
kernel_ptr<float> const g_left,
kernel_ptr<float> rr_bottom,
kernel_ptr<float> rr_right,
kernel_ptr<float> rr_top,
kernel_ptr<float> rr_left,
int step, int Np, int Ng)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int k = threadIdx.y + blockIdx.y * blockDim.y;
int p = threadIdx.z + blockIdx.z * blockDim.z;
int g = p + Np * step;
if (i > 20 && i < 180 && k > 1 && k < NT && g < Ng && p < Np) {
// store difference at time k of original signal
// and current signal at bottom sensor row
rr_bottom(i, k, g) = g_bottom(i, k, g) - u(i, 180, k, p);
// store difference at time k of original signal
// and current signal at top sensor row
rr_top(i, k, g) = g_top(i, k, g) - u(i, 20, k, p);
// store difference at time k of original signal
// and current signal at right sensor column
rr_right(i, k, g) = g_right(i, k, g) - u(180, i, k, p);
// store difference at time k of original signal
// and current signal at left sensor column
rr_left(i, k, g) = g_left(i, k, g) - u(20, i, k, p);
}
}
__global__ void backpropagation1(
kernel_ptr<float> z,
kernel_ptr<float> const f,
int k, int step, int Np, int Ng)
{
// Map from threadIdx / BlockIdx to pixel position
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int p = threadIdx.z + blockIdx.z * blockDim.z;
int g = p + Np * step;
if(i >= 1 && i < (NX - 1) && j >= 1 && j < (NY - 1) && g < Ng && p < Np)
{
z(i, j, k, p) =
1500.f * 1500.f * (DT * DT) *
((1.f + f(i, j-1)) * z(i, j-1, k+1, p) +
(1.f + f(i, j+1)) * z(i, j+1, k+1, p) +
(1.f + f(i-1, j)) * z(i-1, j, k+1, p) +
(1.f + f(i+1, j)) * z(i+1, j, k+1, p) -
4.f * (1.f + f(i, j)) *
z(i, j, k+1, p)) / (H * H) +
2.f * z(i, j, k+1, p) -
z(i, j, k+2, p);
}
}
__global__ void backpropagation2(
kernel_ptr<float> z,
kernel_ptr<float> const rr_bottom,
kernel_ptr<float> const rr_right,
kernel_ptr<float> const rr_top,
kernel_ptr<float> const rr_left,
int k, int step, int Np, int Ng)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int p = threadIdx.y + blockIdx.y * blockDim.y;
int g = p + Np * step;
if (g < Ng && p < Np) {
if(i >= 21 && i < 180) {
z(i, 180, k, p) =
z(i, 179, k, p) +
rr_bottom(i, k, g) * H * 1000.f;
z(i, 20, k, p) =
z(i, 21, k, p) +
rr_top(i, k, g) * H * 1000.f;
z(180, i, k, p) =
z(179, i, k, p) +
rr_right(i, k, g) * H * 1000.f;
z(20, i, k, p) =
z(21, i, k, p) +
rr_left(i, k, g) * H * 1000.f;
}
if (i >= 1 && i < (NX - 1)) {
z(i, 0, k, p) =
z(i, 1, k, p);
z(i, NY-1, k, p) =
z(i, NY-2, k, p);
z(0, i, k, p) =
z(1, i, k, p);
z(NX-1, i, k, p) =
z(NX-2, i, k, p);
}
else if (i == 0) {
z(0, 0, k, p) =
(z(1, 0, k, p) +
z(0, 1, k, p)) / 2.f;
z(NX-1, 0, k, p) =
(z(NX-2, 0, k, p) +
z(NX-1, 1, k, p)) / 2.f;
z(0, NY-1, k, p) =
(z(1, NY-1, k, p) +
z(0, NY-2, k, p)) / 2.f;
z(NX-1, NY-1, k, p) =
(z(NX-2, NY-1, k, p) +
z(NX-1, NY-2, k, p)) / 2.f;
}
}
}
__global__ void laplace(
kernel_ptr<float> const u,
kernel_ptr<float> Lu,
int step, int Np, int Ng)
{
// Map from threadIdx / BlockIdx to pixel position
int tx = threadIdx.x + blockIdx.x * blockDim.x;
int k = threadIdx.y + blockIdx.y * blockDim.y;
int p = threadIdx.z + blockIdx.z * blockDim.z;
int g = p + Np * step;
if (tx < (NX * NY) && (k + 1) < NT && g < Ng && p < Np) {
int i = tx % NX;
int j = tx / NX;
int ja = (j > 0) ? (j - 1) : j;
int jb = (j < NY - 1) ? (j + 1) : j;
int ia = (i > 0) ? (i - 1) : i;
int ib = (i < NX - 1) ? (i + 1) : i;
Lu(i, j, k+1, p) =
(u(i, ja, k+1, p) +
u(i, jb, k+1, p) +
u(ia, j, k+1, p) +
u(ib, j, k+1, p) -
4.f * u(i, j, k+1, p)) / (H * H);
}
}
__global__ void laplace_corners(
kernel_ptr<float> const u,
kernel_ptr<float> Lu,
int step, int Np, int Ng)
{
int k = threadIdx.x + blockIdx.x * blockDim.x;
int p = threadIdx.y + blockIdx.y * blockDim.y;
int g = p + Np * step;
if ((k + 1) < NT && g < Ng && p < Np) {
Lu(0, 0, k+1, p) =
(Lu(1, 0, k+1, p) +
Lu(0, 1, k+1, p)) / 2.f;
Lu(NX-1, 0, k+1, p) =
(Lu(NX-2, 0, k+1, p) +
Lu(NX-1, 1, k+1, p)) / 2.f;
Lu(0, NY-1, k+1, p) =
(Lu(1, NY-1, k+1, p) +
Lu(0, NY-2, k+1, p)) / 2.f;
Lu(NX-1, NY-1, k+1, p) =
(Lu(NX-2, NY-1, k+1, p) +
Lu(NX-1, NY-2, k+1, p)) / 2.f;
}
}
__global__ void update_differential(
kernel_ptr<float> df,
kernel_ptr<float> df_avg,
kernel_ptr<float> const z,
kernel_ptr<float> const Lu,
kernel_ptr<float> const f,
int step, int Np, int Ng)
{
// Map from threadIdx / BlockIdx to pixel position
int tx = threadIdx.x + blockIdx.x * blockDim.x;
int k = threadIdx.y + blockIdx.y * blockDim.y;
int p = threadIdx.z + blockIdx.z * blockDim.z;
int g = p + Np * step;
if (tx < (NX * NY) && (k + 1) < NT && g < Ng && p < Np) {
int i = tx % NX;
int j = tx / NX;
float val =
z(i, j, k+1, p) *
Lu(i, j, k+1, p) /
(1.f + f(i, j));
atomicAdd(&df(i, j, g), val);
atomicAdd(&df_avg(i, j), val);
}
}
__global__ void update_field(
kernel_ptr<float> f,
kernel_ptr<float> const df_avg,
kernel_ptr<float> f_minus_fo,
kernel_ptr<float> const fo,
float scale,
int Ng)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < NX && j < NY)
{
bool in_sensor_field = (i >= 21) && (i < 180) && (j >= 21) && (j < 180);
if (in_sensor_field)
f(i, j) += scale * df_avg(i, j);
f_minus_fo(i, j) = f(i, j) - fo(i, j);
}
}
/**********INLINE FUNCTION DEFINITIONS**********/
inline int grid_size(int n, int threads)
{
return ceil(float(n) / threads);
}
// POST-CONDITION: a <= b
template <typename T>
__host__ __device__
void minmax(T &a, T &b)
{
if (a > b) {
int t = a;
a = b;
b = t;
}
}
|
552b51044bc02aa83e13d76ca79f90574d4b12cd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hor.cuh"
__global__ void horspool(char *text, unsigned long text_size, char *pattern,
int pattern_size, unsigned char hbc[], int stride_length, int *match) {
int i;
unsigned long thread_id = blockDim.x * blockIdx.x + threadIdx.x;
unsigned long start_inx = thread_id * stride_length;
unsigned long boundary = start_inx + stride_length;
unsigned long j = start_inx;
/*
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int start_index = tid * stride_length;
int end_index = (tid + 1) * stride_length + pattern_size - 1;
*/
// match[tid] = 0;
while(j < boundary && j <= text_size - pattern_size) {
i = 0;
while(i < pattern_size && pattern[i] == text[j + i]) i++;
if(i == pattern_size) match[j] = 1;
j += hbc[text[j + pattern_size - 1]];
}
}
void pre_horspool(char *pattern, int pattern_size, unsigned char hbc[]){
int i;
for(i = 0; i < SIGMA; i++) hbc[i] = pattern_size;
for(i = 0; i < pattern_size - 1; i++) hbc[pattern[i]] = pattern_size - i - 1;
}
|
552b51044bc02aa83e13d76ca79f90574d4b12cd.cu
|
#include "hor.cuh"
__global__ void horspool(char *text, unsigned long text_size, char *pattern,
int pattern_size, unsigned char hbc[], int stride_length, int *match) {
int i;
unsigned long thread_id = blockDim.x * blockIdx.x + threadIdx.x;
unsigned long start_inx = thread_id * stride_length;
unsigned long boundary = start_inx + stride_length;
unsigned long j = start_inx;
/*
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int start_index = tid * stride_length;
int end_index = (tid + 1) * stride_length + pattern_size - 1;
*/
// match[tid] = 0;
while(j < boundary && j <= text_size - pattern_size) {
i = 0;
while(i < pattern_size && pattern[i] == text[j + i]) i++;
if(i == pattern_size) match[j] = 1;
j += hbc[text[j + pattern_size - 1]];
}
}
void pre_horspool(char *pattern, int pattern_size, unsigned char hbc[]){
int i;
for(i = 0; i < SIGMA; i++) hbc[i] = pattern_size;
for(i = 0; i < pattern_size - 1; i++) hbc[pattern[i]] = pattern_size - i - 1;
}
|
25db66cf06936880307dbb718be467bfe8f732c9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Host code for stream compaction.
* The input stream is filtered to remove all values <= 0 in it.
* The output stream contains only positive value > 0.
*
* Author: Naga Kandasamy
* Date created: May 12, 2019
* Date modified: May 30, 2020
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <sys/time.h>
/* Uncomment the line below if you want debug information */
// #define DEBUG
#define NUM_ELEMENTS 1024
#define MIN_NUMBER -10
#define MAX_NUMBER 10
/* Include kernel */
#include "compact_kernel.hip"
void run_test(int);
extern "C" void compute_gold(int *, int *, int *);
void check_CUDA_error(const char *);
int check_results(int *, int *, int);
void print_elements(int *, int);
void print_scanned_elements(int *, int);
int rand_int(int, int);
int main(int argc, char **argv)
{
int num_elements = NUM_ELEMENTS;
run_test(num_elements);
exit(EXIT_SUCCESS);
}
void run_test(int num_elements)
{
/* Memory on host to store input data */
int mem_size = sizeof(int) * num_elements;
int *h_data = (int *)malloc(mem_size);
/* Initialize input data to be random values between [-0.5, +0.5] */
printf("\nGenerating input stream of %d elements\n", num_elements);
srand(time (NULL));
int i;
for (i = 0; i < num_elements; ++i)
h_data[i] = rand_int(MIN_NUMBER, MAX_NUMBER);
#ifdef DEBUG
printf("\nOriginal stream\n");
print_elements(h_data, num_elements);
#endif
/* Compute reference solution */
printf("\nCompacting stream on CPU\n");
int *reference = (int *)malloc(mem_size);
int h_new_n = num_elements;
compute_gold(reference, h_data, &h_new_n);
#ifdef DEBUG
print_elements(reference, h_new_n);
#endif
printf("Number of elements in compacted stream = %d\n", h_new_n);
/* Allocate memory on device for input and output arrays */
printf("\nCompacting stream on GPU\n");
int *d_in, *d_out;
hipMalloc((void **)&d_in, mem_size);
hipMalloc((void **)&d_out, mem_size);
/* Copy input array to device */
hipMemcpy(d_in, h_data, mem_size, hipMemcpyHostToDevice);
/* Allocate memory on host and on device for the scanned flag array */
int *h_flag, *d_flag;
h_flag = (int *)malloc(num_elements * sizeof(int));
hipMalloc((void **)&d_flag, num_elements * sizeof(int));
/* Allocate memory on device for integer.
* It stores the number of elements in the compacted stream.
*/
int *d_new_n;
hipMalloc((void **)&d_new_n, sizeof(int));
/* Set up execution grid.
* Note: this implementation only supports a single thread-block worth of data.
*/
dim3 grid(1, 1);
dim3 threads(NUM_ELEMENTS, 1, 1);
hipLaunchKernelGGL(( compact_kernel), dim3(grid), dim3(threads), 0, 0, d_out, d_in, num_elements, d_new_n, d_flag);
hipDeviceSynchronize();
check_CUDA_error("KERNEL EXECUTION FAILURE");
/* Copy results from device to host */
hipMemcpy(&h_new_n, d_new_n, sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(h_flag, d_flag, num_elements * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(h_data, d_out, h_new_n * sizeof(int), hipMemcpyDeviceToHost);
#ifdef DEBUG
print_scanned_elements(h_flag, num_elements);
print_elements(h_data, h_new_n);
#endif
printf("Number of elements in compacted stream = %d\n", h_new_n);
int result = check_results(reference, h_data, h_new_n);
printf("\nTEST %s\n", (0 == result) ? "PASSED" : "FAILED");
/* Cleanup memory */
free(h_data);
free(reference);
hipFree(d_new_n);
hipFree(d_in);
hipFree(d_out);
exit(EXIT_SUCCESS);
}
void check_CUDA_error(const char *msg)
{
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
printf("CUDA ERROR: %s (%s).\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/* Return random integer between [min, max] */
int rand_int(int min, int max)
{
float r = rand()/(float)RAND_MAX;
return (int)floorf(min + (max - min) * r);
}
/* Check GPU and CPU results. Return 0 on success, -1 otherwise */
int check_results(int *reference, int *gpu_result, int n)
{
int check = 0;
int i;
for (i = 0; i < n; i++)
if (reference[i] != gpu_result[i]) {
check = -1;
break;
}
return check;
}
void print_elements(int *in, int num_elements)
{
int i;
for (i = 0; i < num_elements; i++)
printf ("%0.2f ", in[i]);
printf ("\n");
}
void print_scanned_elements(int *in, int num_elements)
{
int i;
for (i = 0; i < num_elements; i++)
printf ("%d ", in[i]);
printf ("\n");
}
|
25db66cf06936880307dbb718be467bfe8f732c9.cu
|
/* Host code for stream compaction.
* The input stream is filtered to remove all values <= 0 in it.
* The output stream contains only positive value > 0.
*
* Author: Naga Kandasamy
* Date created: May 12, 2019
* Date modified: May 30, 2020
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <sys/time.h>
/* Uncomment the line below if you want debug information */
// #define DEBUG
#define NUM_ELEMENTS 1024
#define MIN_NUMBER -10
#define MAX_NUMBER 10
/* Include kernel */
#include "compact_kernel.cu"
void run_test(int);
extern "C" void compute_gold(int *, int *, int *);
void check_CUDA_error(const char *);
int check_results(int *, int *, int);
void print_elements(int *, int);
void print_scanned_elements(int *, int);
int rand_int(int, int);
int main(int argc, char **argv)
{
int num_elements = NUM_ELEMENTS;
run_test(num_elements);
exit(EXIT_SUCCESS);
}
void run_test(int num_elements)
{
/* Memory on host to store input data */
int mem_size = sizeof(int) * num_elements;
int *h_data = (int *)malloc(mem_size);
/* Initialize input data to be random values between [-0.5, +0.5] */
printf("\nGenerating input stream of %d elements\n", num_elements);
srand(time (NULL));
int i;
for (i = 0; i < num_elements; ++i)
h_data[i] = rand_int(MIN_NUMBER, MAX_NUMBER);
#ifdef DEBUG
printf("\nOriginal stream\n");
print_elements(h_data, num_elements);
#endif
/* Compute reference solution */
printf("\nCompacting stream on CPU\n");
int *reference = (int *)malloc(mem_size);
int h_new_n = num_elements;
compute_gold(reference, h_data, &h_new_n);
#ifdef DEBUG
print_elements(reference, h_new_n);
#endif
printf("Number of elements in compacted stream = %d\n", h_new_n);
/* Allocate memory on device for input and output arrays */
printf("\nCompacting stream on GPU\n");
int *d_in, *d_out;
cudaMalloc((void **)&d_in, mem_size);
cudaMalloc((void **)&d_out, mem_size);
/* Copy input array to device */
cudaMemcpy(d_in, h_data, mem_size, cudaMemcpyHostToDevice);
/* Allocate memory on host and on device for the scanned flag array */
int *h_flag, *d_flag;
h_flag = (int *)malloc(num_elements * sizeof(int));
cudaMalloc((void **)&d_flag, num_elements * sizeof(int));
/* Allocate memory on device for integer.
* It stores the number of elements in the compacted stream.
*/
int *d_new_n;
cudaMalloc((void **)&d_new_n, sizeof(int));
/* Set up execution grid.
* Note: this implementation only supports a single thread-block worth of data.
*/
dim3 grid(1, 1);
dim3 threads(NUM_ELEMENTS, 1, 1);
compact_kernel<<<grid, threads>>>(d_out, d_in, num_elements, d_new_n, d_flag);
cudaDeviceSynchronize();
check_CUDA_error("KERNEL EXECUTION FAILURE");
/* Copy results from device to host */
cudaMemcpy(&h_new_n, d_new_n, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(h_flag, d_flag, num_elements * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(h_data, d_out, h_new_n * sizeof(int), cudaMemcpyDeviceToHost);
#ifdef DEBUG
print_scanned_elements(h_flag, num_elements);
print_elements(h_data, h_new_n);
#endif
printf("Number of elements in compacted stream = %d\n", h_new_n);
int result = check_results(reference, h_data, h_new_n);
printf("\nTEST %s\n", (0 == result) ? "PASSED" : "FAILED");
/* Cleanup memory */
free(h_data);
free(reference);
cudaFree(d_new_n);
cudaFree(d_in);
cudaFree(d_out);
exit(EXIT_SUCCESS);
}
void check_CUDA_error(const char *msg)
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
printf("CUDA ERROR: %s (%s).\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/* Return random integer between [min, max] */
int rand_int(int min, int max)
{
float r = rand()/(float)RAND_MAX;
return (int)floorf(min + (max - min) * r);
}
/* Check GPU and CPU results. Return 0 on success, -1 otherwise */
int check_results(int *reference, int *gpu_result, int n)
{
int check = 0;
int i;
for (i = 0; i < n; i++)
if (reference[i] != gpu_result[i]) {
check = -1;
break;
}
return check;
}
void print_elements(int *in, int num_elements)
{
int i;
for (i = 0; i < num_elements; i++)
printf ("%0.2f ", in[i]);
printf ("\n");
}
void print_scanned_elements(int *in, int num_elements)
{
int i;
for (i = 0; i < num_elements; i++)
printf ("%d ", in[i]);
printf ("\n");
}
|
a2dbb90e7b1ac4f2e17e1415cba96ea1f03a05bd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <cmath>
#include <cassert>
#include <streaming/saxpy.hpp>
#include <util/cuda_vector.hpp>
#include <util/grid_stride.hpp>
#include <util/cuda_grid_config.hpp>
#include <util/cuda_error.hpp>
#include <util/cuda_init.hpp>
#include <rocblas.h>
#include <cub/hipcub/hipcub.hpp>
using cuda::grid_stride_range;
using cuda::util::getGridDimensions;
using cuda::util::lang::range;
template <typename T>
__global__
void saxpy_gpu_naive(const T *x,
const T *y,
T *z,
unsigned N, T alpha) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < N;
i += blockDim.x * gridDim.x) {
z[i] = alpha * x[i] + y[i];
}
}
template <typename T>
__global__
void saxpy_gpu_naive_rangebasedloop(const T *x,
const T *y,
T *z,
unsigned N, T alpha) {
for (auto i : grid_stride_range<unsigned>(0, N) ) {
z[i] = alpha * x[i] + y[i];
printf("blockIdx.x %d - blockDim.x %d - threadIdx.x %d = %d\n",
blockIdx.x, blockDim.x, threadIdx.x, i);
}
}
template <typename T>
__global__
void saxpy_gpu_c_vector(const cuda::vector<T>& x, const cuda::vector<T>& y, cuda::vector<T>& z, unsigned N, T alpha) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < N;
i += blockDim.x * gridDim.x) {
z[i] = alpha * x[i] + y[i];
}
}
template <typename T>
__global__
void saxpy_gpu_cpp_vector(const cuda::vector<T>& x, const cuda::vector<T>& y, cuda::vector<T>& z, unsigned N, T alpha) {
for (auto i : grid_stride_range<unsigned>(0, N) ) {
z[i] = alpha * x[i] + y[i];
}
}
template <typename T, const int unroll>
__global__
void saxpy_gpu_unroll(const T *x,
const T *y,
T *z,
unsigned N, T alpha) {
T x_reg[unroll], y_reg[unroll];
unsigned i;
for ( i = unroll * blockIdx.x * blockDim.x + threadIdx.x;
i < N - unroll * blockDim.x * gridDim.x;
i += unroll * blockDim.x * gridDim.x ) {
#pragma unroll
for (int j = 0; j < unroll; j++) {
unsigned gindex = j * blockDim.x + i;
x_reg[j] = x[gindex];
y_reg[j] = y[gindex];
}
#pragma unroll
for (int j = 0; j < unroll; j++) {
unsigned gindex = j * blockDim.x + i;
z[gindex] = alpha * x_reg[j] + y_reg[j];
}
}
// to avoid the (index<N) conditional in the inner loop,
// we left off some work at the end
for (int j = 0; j < unroll; j++) {
#pragma unroll
for (int j = 0; j < unroll; j++) {
unsigned gindex = j * blockDim.x + i;
if (gindex < N) {
x_reg[j] = x[gindex];
y_reg[j] = y[gindex];
}
}
#pragma unroll
for (int j = 0; j < unroll; j++) {
unsigned gindex = j * blockDim.x + i;
if (gindex < N)
z[gindex] = alpha * x_reg[j] + y_reg[j];
}
}
}
template <typename T, const int unroll>
__global__
void saxpy_gpu_cpp_vector_unroll(const cuda::vector<T>& x, const cuda::vector<T>& y, cuda::vector<T>& z, unsigned N, T alpha) {
T x_reg[unroll], y_reg[unroll];
auto i_last = 0;
for (auto i : grid_stride_range<unsigned>(0, N, unroll) ) {
for (auto j : range<unsigned>(0, unroll)) {
unsigned gindex = j * blockDim.x + i;
x_reg[j] = x[gindex];
y_reg[j] = y[gindex];
}
for (auto j : range(0, unroll)) {
unsigned gindex = j * blockDim.x + i;
z[gindex] = alpha * x_reg[j] + y_reg[j];
}
i_last = i;
}
// to avoid the (index<N) conditional in the inner loop,
// we left off some work at the end
for (auto j : range<unsigned>(0, unroll)) {
for (auto j : range<unsigned>(0, unroll)) {
unsigned gindex = j * blockDim.x + i_last;
if (gindex < N) {
x_reg[j] = x[gindex];
y_reg[j] = y[gindex];
}
}
for (auto j : range<unsigned>(0, unroll) ) {
unsigned gindex = j * blockDim.x + i_last;
if (gindex < N)
z[gindex] = alpha * x_reg[j] + y_reg[j];
}
}
}
template <typename T, const unsigned blockDimx, const int unroll>
__global__
void saxpy_gpu_cub(const T* x,
const T* y,
T* z,
unsigned N, T alpha) {
using BlockLoad = cub::BlockLoad<const T*, blockDimx, unroll, cub::BLOCK_LOAD_WARP_TRANSPOSE>;
using BlockStore = cub::BlockStore<T*, blockDimx, unroll, cub::BLOCK_STORE_WARP_TRANSPOSE>;
__shared__ union
{
typename BlockLoad::TempStorage load_x;
typename BlockLoad::TempStorage load_y;
typename BlockStore::TempStorage store;
} storage_smem;
T x_reg[unroll], y_reg[unroll], z_reg[unroll];
BlockLoad(storage_smem.load_x).Load(x, x_reg, N);
BlockLoad(storage_smem.load_y).Load(y, y_reg, N);
__syncthreads();
for (int i = 0; i < unroll; i++)
z_reg[i] = alpha * x_reg[i] + y_reg[i];
BlockStore(storage_smem.store).Store(z, z_reg, N);
};
/*
template <typename T, const unsigned blockDimx, const unsigned blockDimy, const int unroll>
__global__
void test_cub_2d(const T* x, const T* y, T* z, unsigned N, T alpha) {
using BlockLoad = cub::BlockLoad<const T*, blockDimx, unroll, cub::BLOCK_LOAD_WARP_TRANSPOSE, blockDimy>;
using BlockStore = cub::BlockStore<T*, blockDimx, unroll, cub::BLOCK_STORE_WARP_TRANSPOSE, blockDimy>;
__shared__ union
{
typename BlockLoad::TempStorage load_x;
typename BlockLoad::TempStorage load_y;
typename BlockStore::TempStorage store;
} storage_smem;
T x_reg[unroll*unroll], y_reg[unroll*unroll], z_reg[unroll*unroll];
BlockLoad(storage_smem.load_x).Load(x, x_reg);
BlockLoad(storage_smem.load_y).Load(y, y_reg);
__syncthreads();
for (int i = 0; i < unroll; i++) {
for (int j = 0; j < unroll; j++) {
const unsigned index = i * unroll + j;
z_reg[index] = alpha * x_reg[index] + y_reg[index];
}
}
BlockStore(storage_smem.store).Store(z, z_reg);
};
*/
////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////
template <typename T>
void run_saxpy_c(const T* px,
const T* py,
T* pz,
unsigned N,
T alpha,
unsigned repetitions) {
cuda::error err;
unsigned block_size_x = 128;
unsigned block_size_y = 1;
unsigned block_size_z = 1;
dim3 dimGrid = getGridDimensions(N, 1, 1, block_size_x, block_size_y, block_size_z);
dim3 dimBlock( block_size_x, block_size_y, block_size_z );
unsigned block_size2_x = 128;
unsigned block_size2_y = 1;
unsigned block_size2_z = 1;
const unsigned unroll2 = 2;
dim3 dimGrid2 = getGridDimensions(N/unroll2, 1, 1, block_size2_x, block_size2_y, block_size2_z);
dim3 dimBlock2( block_size2_x, block_size2_y, block_size2_z );
unsigned block_size4_x = 128;
unsigned block_size4_y = 1;
unsigned block_size4_z = 1;
const unsigned unroll4 = 4;
dim3 dimGrid4 = getGridDimensions(N/unroll4, 1, 1, block_size4_x, block_size4_y, block_size4_z);
dim3 dimBlock4( block_size4_x, block_size4_y, block_size4_z );
std::cout << "Launching saxpy C kernels" << std::endl;
std::cout << "Grid 1 [" << dimGrid.x << "," << dimGrid.y << "," << dimGrid.z << "]" << std::endl;
std::cout << "Grid 2 [" << dimGrid2.x << "," << dimGrid2.y << "," << dimGrid2.z << "]" << std::endl;
std::cout << "Grid 4 [" << dimGrid4.x << "," << dimGrid4.y << "," << dimGrid4.z << "]" << std::endl;
std::cout << "Block 1 [" << dimBlock.x << "," << dimBlock.y << "," << dimBlock.z << "]" << std::endl;
std::cout << "Block 2 [" << dimBlock2.x << "," << dimBlock2.y << "," << dimBlock2.z << "]" << std::endl;
std::cout << "Block 4 [" << dimBlock4.x << "," << dimBlock4.y << "," << dimBlock4.z << "]" << std::endl;
for (int i = 0; i < repetitions; i++) {
hipLaunchKernelGGL(( saxpy_gpu_naive<T>), dim3(dimGrid), dim3(dimBlock), 0, 0, px, py, pz, N, alpha);
err = hipGetLastError();
hipLaunchKernelGGL(( saxpy_gpu_naive_rangebasedloop<T>), dim3(dimGrid), dim3(dimBlock), 0, 0, px, py, pz, N, alpha);
err = hipGetLastError();
hipLaunchKernelGGL(( saxpy_gpu_unroll<T, unroll2>), dim3(dimGrid2), dim3(dimBlock2), 0, 0, px, py, pz, N, alpha);
err = hipGetLastError();
hipLaunchKernelGGL(( saxpy_gpu_unroll<T, unroll4>), dim3(dimGrid4), dim3(dimBlock4), 0, 0, px, py, pz, N, alpha);
err = hipGetLastError();
}
std::cout << std::endl;
err = hipDeviceSynchronize();
}
template <typename T>
void run_saxpy_cpp(const cuda::vector<T>& x,
const cuda::vector<T>& y,
cuda::vector<T>& z,
unsigned N,
T alpha,
unsigned repetitions) {
cuda::error err;
unsigned block_size_x = 128;
unsigned block_size_y = 1;
unsigned block_size_z = 1;
dim3 dimGrid = getGridDimensions(N, 1, 1, block_size_x, block_size_y, block_size_z);
dim3 dimBlock( block_size_x, block_size_y, block_size_z );
unsigned block_size2_x = 128;
unsigned block_size2_y = 1;
unsigned block_size2_z = 1;
const unsigned unroll2 = 2;
dim3 dimGrid2 = getGridDimensions(N/unroll2, 1, 1, block_size2_x, block_size2_y, block_size2_z);
dim3 dimBlock2( block_size2_x, block_size2_y, block_size2_z );
unsigned block_size4_x = 128;
unsigned block_size4_y = 1;
unsigned block_size4_z = 1;
const unsigned unroll4 = 4;
dim3 dimGrid4 = getGridDimensions(N/unroll4, 1, 1, block_size4_x, block_size4_y, block_size4_z);
dim3 dimBlock4( block_size4_x, block_size4_y, block_size4_z );
std::cout << "Launching saxpy CPP kernels" << std::endl;
std::cout << "Grid 1 [" << dimGrid.x << "," << dimGrid.y << "," << dimGrid.z << "]" << std::endl;
std::cout << "Grid 2 [" << dimGrid2.x << "," << dimGrid2.y << "," << dimGrid2.z << "]" << std::endl;
std::cout << "Grid 4 [" << dimGrid4.x << "," << dimGrid4.y << "," << dimGrid4.z << "]" << std::endl;
std::cout << "Block 1 [" << dimBlock.x << "," << dimBlock.y << "," << dimBlock.z << "]" << std::endl;
std::cout << "Block 2 [" << dimBlock2.x << "," << dimBlock2.y << "," << dimBlock2.z << "]" << std::endl;
std::cout << "Block 4 [" << dimBlock4.x << "," << dimBlock4.y << "," << dimBlock4.z << "]" << std::endl;
for (int i = 0; i < repetitions; i++) {
hipLaunchKernelGGL(( saxpy_gpu_cpp_vector<T>), dim3(dimGrid), dim3(dimBlock), 0, 0, x, y, z, N, alpha);
err = hipGetLastError();
hipLaunchKernelGGL(( saxpy_gpu_cpp_vector_unroll<T, unroll2>), dim3(dimGrid2), dim3(dimBlock2), 0, 0, x, y, z, N, alpha);
err = hipGetLastError();
hipLaunchKernelGGL(( saxpy_gpu_cpp_vector_unroll<T, unroll4>), dim3(dimGrid4), dim3(dimBlock4), 0, 0, x, y, z, N, alpha);
err = hipGetLastError();
}
std::cout << std::endl;
err = hipDeviceSynchronize();
}
template <typename T>
void run_saxpy_cublas(const T* px,
T* py,
unsigned N,
const T alpha,
unsigned repetitions);
template <>
void run_saxpy_cublas(const float* px,
float* py,
unsigned N,
const float alpha,
unsigned repetitions) {
hipblasStatus_t status;
hipblasHandle_t handle;
std::cout << "Launching saxpy CUBLAS" << std::endl;
status = hipblasCreate(&handle);
assert(status == HIPBLAS_STATUS_SUCCESS);
for (int i = 0; i < repetitions; i++) {
status = hipblasSaxpy(handle, N, &alpha, px, 1, py, 1);
assert(status == HIPBLAS_STATUS_SUCCESS);
}
std::cout << std::endl;
}
template <>
void run_saxpy_cublas(const double* px,
double* py,
unsigned N,
const double alpha,
unsigned repetitions) {
hipblasStatus_t status;
hipblasHandle_t handle;
std::cout << "Launching saxpy CUBLAS" << std::endl;
status = hipblasCreate(&handle);
assert(status == HIPBLAS_STATUS_SUCCESS);
for (int i = 0; i < repetitions; i++) {
status = hipblasDaxpy(handle, N, &alpha, px, 1, py, 1);
assert(status == HIPBLAS_STATUS_SUCCESS);
}
std::cout << std::endl;
}
template <typename T>
void run_saxpy_cub(const T* px,
const T* py,
T* pz,
unsigned N,
T alpha,
unsigned repetitions) {
cuda::error err;
unsigned block_size_x = 128;
unsigned block_size_y = 1;
unsigned block_size_z = 1;
dim3 dimGrid = getGridDimensions(N, 1, 1, block_size_x, block_size_y, block_size_z);
dim3 dimBlock( block_size_x, block_size_y, block_size_z );
unsigned block_size2_x = 128;
unsigned block_size2_y = 1;
unsigned block_size2_z = 1;
const unsigned unroll2 = 2;
dim3 dimGrid2 = getGridDimensions(N/unroll2, 1, 1, block_size2_x, block_size2_y, block_size2_z);
dim3 dimBlock2( block_size2_x, block_size2_y, block_size2_z );
unsigned block_size4_x = 128;
unsigned block_size4_y = 1;
unsigned block_size4_z = 1;
const unsigned unroll4 = 4;
dim3 dimGrid4 = getGridDimensions(N/unroll4, 1, 1, block_size4_x, block_size4_y, block_size4_z);
dim3 dimBlock4( block_size4_x, block_size4_y, block_size4_z );
std::cout << "Launching saxpy CUB kernels" << std::endl;
std::cout << "Grid 1 [" << dimGrid.x << "," << dimGrid.y << "," << dimGrid.z << "]" << std::endl;
std::cout << "Grid 2 [" << dimGrid2.x << "," << dimGrid2.y << "," << dimGrid2.z << "]" << std::endl;
std::cout << "Grid 4 [" << dimGrid4.x << "," << dimGrid4.y << "," << dimGrid4.z << "]" << std::endl;
std::cout << "Block 1 [" << dimBlock.x << "," << dimBlock.y << "," << dimBlock.z << "]" << std::endl;
std::cout << "Block 2 [" << dimBlock2.x << "," << dimBlock2.y << "," << dimBlock2.z << "]" << std::endl;
std::cout << "Block 4 [" << dimBlock4.x << "," << dimBlock4.y << "," << dimBlock4.z << "]" << std::endl;
for (int i = 0; i < repetitions; i++) {
hipLaunchKernelGGL(( saxpy_gpu_cub<T, 128, 1>), dim3(dimGrid), dim3(dimBlock), 0, 0, px, py, pz, N, alpha);
err = hipGetLastError();
hipLaunchKernelGGL(( saxpy_gpu_cub<T, 128, unroll2>), dim3(dimGrid2), dim3(dimBlock2), 0, 0, px, py, pz, N, alpha);
err = hipGetLastError();
hipLaunchKernelGGL(( saxpy_gpu_cub<T, 128, unroll4>), dim3(dimGrid4), dim3(dimBlock4), 0, 0, px, py, pz, N, alpha);
err = hipGetLastError();
}
std::cout << std::endl;
err = hipDeviceSynchronize();
}
template <typename T>
void saxpy_c(cuda::device& gpu, unsigned N, unsigned repetitions) {
try {
cuda::device gpu;
cuda::error err;
cout << "--- SAXPY C ---" << endl;
size_t free, total;
gpu.getMemInfo(free, total);
size_t allocated_by_os = total - free;
size_t est_program_alloc = 3 * N * sizeof(T);
T *px, *py, *pz;
err = hipMalloc((void**)&px, N * sizeof(T));
err = hipMalloc((void**)&py, N * sizeof(T));
err = hipMalloc((void**)&pz, N * sizeof(T));
T alpha = 0.8;
gpu.getMemInfo(free, total);
cout << "Free mem: " << free/(1024*1024) << " / " << total/(1024*1024) << " MB" << endl;
size_t real_program_alloc = total - free - allocated_by_os;
float factor = (real_program_alloc - est_program_alloc)/ static_cast<float>(real_program_alloc);
cout << "Mem allocated by os: " << allocated_by_os/(1024*1024) << " MB" << endl;
cout << "Mem allocated by program[Est]: " << est_program_alloc/(1024*1024) << " MB" << endl;
cout << "Mem allocated by program[Real]: " << real_program_alloc/(1024*1024) << " MB" << endl;
cout << "Difference " << factor << endl;
run_saxpy_c(px, py, pz, N, alpha, repetitions);
err = hipFree(px);
err = hipFree(py);
err = hipFree(pz);
} catch(cuda::cuda_exception error) {
std::cout << error.what() << std::endl;
}
}
template <typename T>
void saxpy_cpp(cuda::device& gpu, unsigned N, unsigned repetitions) {
try {
cuda::error err;
cout << "--- SAXPY Cpp ---" << endl;
size_t free, total, allocated_by_os;
gpu.getMemInfo(free, total);
allocated_by_os = total - free;
cuda::vector<T> *a = new cuda::vector<T>(N);
cuda::vector<T> *b = new cuda::vector<T>(N);
cuda::vector<T> *c = new cuda::vector<T>(N);
T alpha = 0.8;
gpu.getMemInfo(free, total);
cout << "Free mem: " << free/(1024*1024) << " / " << total/(1024*1024) << " MB" << endl;
size_t est_program_alloc = 3 * N * sizeof(T);
size_t real_program_alloc = total - free - allocated_by_os;
float factor = (real_program_alloc - est_program_alloc)/ static_cast<float>(real_program_alloc);
cout << "Mem allocated by os: " << allocated_by_os/(1024*1024) << " MB" << endl;
cout << "Mem allocated by program[Real]: " << real_program_alloc/(1024*1024) << " MB" << endl;
cout << "Mem allocated by program[Est]: " << est_program_alloc/(1024*1024) << " MB" << endl;
cout << "Difference " << factor << endl;
run_saxpy_cpp(*a, *b, *c, N, alpha, repetitions);
delete(a);
delete(b);
delete(c);
} catch(cuda::cuda_exception error) {
std::cout << error.what() << std::endl;
}
}
template <typename T>
void saxpy_cublas(cuda::device& gpu, unsigned N, unsigned repetitions) {
try {
cuda::device gpu;
cuda::error err;
cout << "--- SAXPY Cublas ---" << endl;
size_t free, total, allocated_by_os;
gpu.getMemInfo(free, total);
allocated_by_os = total - free;
T *px, *py;
err = hipMalloc((void**)&px, N * sizeof(T));
err = hipMalloc((void**)&py, N * sizeof(T));
const T alpha = 0.8;
gpu.getMemInfo(free, total);
cout << "Free mem: " << free/(1024*1024) << " / " << total/(1024*1024) << " MB" << endl;
size_t est_program_alloc = 2 * N * sizeof(T);
size_t real_program_alloc = total - free - allocated_by_os;
float factor = (real_program_alloc - est_program_alloc)/ static_cast<float>(real_program_alloc);
cout << "Mem allocated by os: " << allocated_by_os/(1024*1024) << " MB" << endl;
cout << "Mem allocated by program[Real]: " << real_program_alloc/(1024*1024) << " MB" << endl;
cout << "Mem allocated by program[Est]: " << est_program_alloc/(1024*1024) << " MB" << endl;
cout << "Difference " << factor << endl;
run_saxpy_cublas(px, py, N, alpha, repetitions);
err = hipFree(px);
err = hipFree(py);
} catch(cuda::cuda_exception error) {
std::cout << error.what() << std::endl;
}
}
template <typename T>
void saxpy_cub(cuda::device& gpu, unsigned N, unsigned repetitions) {
try {
cuda::device gpu;
cuda::error err;
cout << "--- SAXPY CUB ---" << endl;
size_t free, total, allocated_by_os;
gpu.getMemInfo(free, total);
allocated_by_os = total - free;
T *px, *py, *pz;
err = hipMalloc((void**)&px, N * sizeof(T));
err = hipMalloc((void**)&py, N * sizeof(T));
err = hipMalloc((void**)&pz, N * sizeof(T));
T alpha = 0.8;
gpu.getMemInfo(free, total);
cout << "Free mem: " << free/(1024*1024) << " / " << total/(1024*1024) << " MB" << endl;
size_t est_program_alloc = 3 * N * sizeof(T);
size_t real_program_alloc = total - free - allocated_by_os;
float factor = (real_program_alloc - est_program_alloc)/ static_cast<float>(real_program_alloc);
cout << "Mem allocated by os: " << allocated_by_os/(1024*1024) << " MB" << endl;
cout << "Mem allocated by program[Real]: " << real_program_alloc/(1024*1024) << " MB" << endl;
cout << "Mem allocated by program[Est]: " << est_program_alloc/(1024*1024) << " MB" << endl;
cout << "Difference " << factor << endl;
run_saxpy_cub(px, py, pz, N, alpha, repetitions);
err = hipFree(px);
err = hipFree(py);
err = hipFree(pz);
} catch(cuda::cuda_exception error) {
std::cout << error.what() << std::endl;
}
}
void launch_saxpy(cuda::device& gpu, unsigned N, unsigned repetitions)
{
saxpy_c<float>(gpu, N, repetitions);
//saxpy_cpp<float>(gpu, N, repetitions);
saxpy_cublas<float>(gpu, N, repetitions);
saxpy_cub<float>(gpu, N, repetitions);
saxpy_c<double>(gpu, N, repetitions);
//saxpy_cpp<double>(gpu, N, repetitions);
saxpy_cublas<double>(gpu, N, repetitions);
saxpy_cub<double>(gpu, N, repetitions);
}
|
a2dbb90e7b1ac4f2e17e1415cba96ea1f03a05bd.cu
|
#include <iostream>
#include <cmath>
#include <cassert>
#include <streaming/saxpy.hpp>
#include <util/cuda_vector.hpp>
#include <util/grid_stride.hpp>
#include <util/cuda_grid_config.hpp>
#include <util/cuda_error.hpp>
#include <util/cuda_init.hpp>
#include <cublas_v2.h>
#include <cub/cub/cub.cuh>
using cuda::grid_stride_range;
using cuda::util::getGridDimensions;
using cuda::util::lang::range;
template <typename T>
__global__
void saxpy_gpu_naive(const T *x,
const T *y,
T *z,
unsigned N, T alpha) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < N;
i += blockDim.x * gridDim.x) {
z[i] = alpha * x[i] + y[i];
}
}
template <typename T>
__global__
void saxpy_gpu_naive_rangebasedloop(const T *x,
const T *y,
T *z,
unsigned N, T alpha) {
for (auto i : grid_stride_range<unsigned>(0, N) ) {
z[i] = alpha * x[i] + y[i];
printf("blockIdx.x %d - blockDim.x %d - threadIdx.x %d = %d\n",
blockIdx.x, blockDim.x, threadIdx.x, i);
}
}
template <typename T>
__global__
void saxpy_gpu_c_vector(const cuda::vector<T>& x, const cuda::vector<T>& y, cuda::vector<T>& z, unsigned N, T alpha) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < N;
i += blockDim.x * gridDim.x) {
z[i] = alpha * x[i] + y[i];
}
}
template <typename T>
__global__
void saxpy_gpu_cpp_vector(const cuda::vector<T>& x, const cuda::vector<T>& y, cuda::vector<T>& z, unsigned N, T alpha) {
for (auto i : grid_stride_range<unsigned>(0, N) ) {
z[i] = alpha * x[i] + y[i];
}
}
template <typename T, const int unroll>
__global__
void saxpy_gpu_unroll(const T *x,
const T *y,
T *z,
unsigned N, T alpha) {
T x_reg[unroll], y_reg[unroll];
unsigned i;
for ( i = unroll * blockIdx.x * blockDim.x + threadIdx.x;
i < N - unroll * blockDim.x * gridDim.x;
i += unroll * blockDim.x * gridDim.x ) {
#pragma unroll
for (int j = 0; j < unroll; j++) {
unsigned gindex = j * blockDim.x + i;
x_reg[j] = x[gindex];
y_reg[j] = y[gindex];
}
#pragma unroll
for (int j = 0; j < unroll; j++) {
unsigned gindex = j * blockDim.x + i;
z[gindex] = alpha * x_reg[j] + y_reg[j];
}
}
// to avoid the (index<N) conditional in the inner loop,
// we left off some work at the end
for (int j = 0; j < unroll; j++) {
#pragma unroll
for (int j = 0; j < unroll; j++) {
unsigned gindex = j * blockDim.x + i;
if (gindex < N) {
x_reg[j] = x[gindex];
y_reg[j] = y[gindex];
}
}
#pragma unroll
for (int j = 0; j < unroll; j++) {
unsigned gindex = j * blockDim.x + i;
if (gindex < N)
z[gindex] = alpha * x_reg[j] + y_reg[j];
}
}
}
template <typename T, const int unroll>
__global__
void saxpy_gpu_cpp_vector_unroll(const cuda::vector<T>& x, const cuda::vector<T>& y, cuda::vector<T>& z, unsigned N, T alpha) {
T x_reg[unroll], y_reg[unroll];
auto i_last = 0;
for (auto i : grid_stride_range<unsigned>(0, N, unroll) ) {
for (auto j : range<unsigned>(0, unroll)) {
unsigned gindex = j * blockDim.x + i;
x_reg[j] = x[gindex];
y_reg[j] = y[gindex];
}
for (auto j : range(0, unroll)) {
unsigned gindex = j * blockDim.x + i;
z[gindex] = alpha * x_reg[j] + y_reg[j];
}
i_last = i;
}
// to avoid the (index<N) conditional in the inner loop,
// we left off some work at the end
for (auto j : range<unsigned>(0, unroll)) {
for (auto j : range<unsigned>(0, unroll)) {
unsigned gindex = j * blockDim.x + i_last;
if (gindex < N) {
x_reg[j] = x[gindex];
y_reg[j] = y[gindex];
}
}
for (auto j : range<unsigned>(0, unroll) ) {
unsigned gindex = j * blockDim.x + i_last;
if (gindex < N)
z[gindex] = alpha * x_reg[j] + y_reg[j];
}
}
}
template <typename T, const unsigned blockDimx, const int unroll>
__global__
void saxpy_gpu_cub(const T* x,
const T* y,
T* z,
unsigned N, T alpha) {
using BlockLoad = cub::BlockLoad<const T*, blockDimx, unroll, cub::BLOCK_LOAD_WARP_TRANSPOSE>;
using BlockStore = cub::BlockStore<T*, blockDimx, unroll, cub::BLOCK_STORE_WARP_TRANSPOSE>;
__shared__ union
{
typename BlockLoad::TempStorage load_x;
typename BlockLoad::TempStorage load_y;
typename BlockStore::TempStorage store;
} storage_smem;
T x_reg[unroll], y_reg[unroll], z_reg[unroll];
BlockLoad(storage_smem.load_x).Load(x, x_reg, N);
BlockLoad(storage_smem.load_y).Load(y, y_reg, N);
__syncthreads();
for (int i = 0; i < unroll; i++)
z_reg[i] = alpha * x_reg[i] + y_reg[i];
BlockStore(storage_smem.store).Store(z, z_reg, N);
};
/*
template <typename T, const unsigned blockDimx, const unsigned blockDimy, const int unroll>
__global__
void test_cub_2d(const T* x, const T* y, T* z, unsigned N, T alpha) {
using BlockLoad = cub::BlockLoad<const T*, blockDimx, unroll, cub::BLOCK_LOAD_WARP_TRANSPOSE, blockDimy>;
using BlockStore = cub::BlockStore<T*, blockDimx, unroll, cub::BLOCK_STORE_WARP_TRANSPOSE, blockDimy>;
__shared__ union
{
typename BlockLoad::TempStorage load_x;
typename BlockLoad::TempStorage load_y;
typename BlockStore::TempStorage store;
} storage_smem;
T x_reg[unroll*unroll], y_reg[unroll*unroll], z_reg[unroll*unroll];
BlockLoad(storage_smem.load_x).Load(x, x_reg);
BlockLoad(storage_smem.load_y).Load(y, y_reg);
__syncthreads();
for (int i = 0; i < unroll; i++) {
for (int j = 0; j < unroll; j++) {
const unsigned index = i * unroll + j;
z_reg[index] = alpha * x_reg[index] + y_reg[index];
}
}
BlockStore(storage_smem.store).Store(z, z_reg);
};
*/
////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////
template <typename T>
void run_saxpy_c(const T* px,
const T* py,
T* pz,
unsigned N,
T alpha,
unsigned repetitions) {
cuda::error err;
unsigned block_size_x = 128;
unsigned block_size_y = 1;
unsigned block_size_z = 1;
dim3 dimGrid = getGridDimensions(N, 1, 1, block_size_x, block_size_y, block_size_z);
dim3 dimBlock( block_size_x, block_size_y, block_size_z );
unsigned block_size2_x = 128;
unsigned block_size2_y = 1;
unsigned block_size2_z = 1;
const unsigned unroll2 = 2;
dim3 dimGrid2 = getGridDimensions(N/unroll2, 1, 1, block_size2_x, block_size2_y, block_size2_z);
dim3 dimBlock2( block_size2_x, block_size2_y, block_size2_z );
unsigned block_size4_x = 128;
unsigned block_size4_y = 1;
unsigned block_size4_z = 1;
const unsigned unroll4 = 4;
dim3 dimGrid4 = getGridDimensions(N/unroll4, 1, 1, block_size4_x, block_size4_y, block_size4_z);
dim3 dimBlock4( block_size4_x, block_size4_y, block_size4_z );
std::cout << "Launching saxpy C kernels" << std::endl;
std::cout << "Grid 1 [" << dimGrid.x << "," << dimGrid.y << "," << dimGrid.z << "]" << std::endl;
std::cout << "Grid 2 [" << dimGrid2.x << "," << dimGrid2.y << "," << dimGrid2.z << "]" << std::endl;
std::cout << "Grid 4 [" << dimGrid4.x << "," << dimGrid4.y << "," << dimGrid4.z << "]" << std::endl;
std::cout << "Block 1 [" << dimBlock.x << "," << dimBlock.y << "," << dimBlock.z << "]" << std::endl;
std::cout << "Block 2 [" << dimBlock2.x << "," << dimBlock2.y << "," << dimBlock2.z << "]" << std::endl;
std::cout << "Block 4 [" << dimBlock4.x << "," << dimBlock4.y << "," << dimBlock4.z << "]" << std::endl;
for (int i = 0; i < repetitions; i++) {
saxpy_gpu_naive<T><<<dimGrid, dimBlock>>>(px, py, pz, N, alpha);
err = cudaGetLastError();
saxpy_gpu_naive_rangebasedloop<T><<<dimGrid, dimBlock>>>(px, py, pz, N, alpha);
err = cudaGetLastError();
saxpy_gpu_unroll<T, unroll2><<<dimGrid2, dimBlock2>>>(px, py, pz, N, alpha);
err = cudaGetLastError();
saxpy_gpu_unroll<T, unroll4><<<dimGrid4, dimBlock4>>>(px, py, pz, N, alpha);
err = cudaGetLastError();
}
std::cout << std::endl;
err = cudaDeviceSynchronize();
}
template <typename T>
void run_saxpy_cpp(const cuda::vector<T>& x,
const cuda::vector<T>& y,
cuda::vector<T>& z,
unsigned N,
T alpha,
unsigned repetitions) {
cuda::error err;
unsigned block_size_x = 128;
unsigned block_size_y = 1;
unsigned block_size_z = 1;
dim3 dimGrid = getGridDimensions(N, 1, 1, block_size_x, block_size_y, block_size_z);
dim3 dimBlock( block_size_x, block_size_y, block_size_z );
unsigned block_size2_x = 128;
unsigned block_size2_y = 1;
unsigned block_size2_z = 1;
const unsigned unroll2 = 2;
dim3 dimGrid2 = getGridDimensions(N/unroll2, 1, 1, block_size2_x, block_size2_y, block_size2_z);
dim3 dimBlock2( block_size2_x, block_size2_y, block_size2_z );
unsigned block_size4_x = 128;
unsigned block_size4_y = 1;
unsigned block_size4_z = 1;
const unsigned unroll4 = 4;
dim3 dimGrid4 = getGridDimensions(N/unroll4, 1, 1, block_size4_x, block_size4_y, block_size4_z);
dim3 dimBlock4( block_size4_x, block_size4_y, block_size4_z );
std::cout << "Launching saxpy CPP kernels" << std::endl;
std::cout << "Grid 1 [" << dimGrid.x << "," << dimGrid.y << "," << dimGrid.z << "]" << std::endl;
std::cout << "Grid 2 [" << dimGrid2.x << "," << dimGrid2.y << "," << dimGrid2.z << "]" << std::endl;
std::cout << "Grid 4 [" << dimGrid4.x << "," << dimGrid4.y << "," << dimGrid4.z << "]" << std::endl;
std::cout << "Block 1 [" << dimBlock.x << "," << dimBlock.y << "," << dimBlock.z << "]" << std::endl;
std::cout << "Block 2 [" << dimBlock2.x << "," << dimBlock2.y << "," << dimBlock2.z << "]" << std::endl;
std::cout << "Block 4 [" << dimBlock4.x << "," << dimBlock4.y << "," << dimBlock4.z << "]" << std::endl;
for (int i = 0; i < repetitions; i++) {
saxpy_gpu_cpp_vector<T><<<dimGrid, dimBlock>>>(x, y, z, N, alpha);
err = cudaGetLastError();
saxpy_gpu_cpp_vector_unroll<T, unroll2><<<dimGrid2, dimBlock2>>>(x, y, z, N, alpha);
err = cudaGetLastError();
saxpy_gpu_cpp_vector_unroll<T, unroll4><<<dimGrid4, dimBlock4>>>(x, y, z, N, alpha);
err = cudaGetLastError();
}
std::cout << std::endl;
err = cudaDeviceSynchronize();
}
template <typename T>
void run_saxpy_cublas(const T* px,
T* py,
unsigned N,
const T alpha,
unsigned repetitions);
template <>
void run_saxpy_cublas(const float* px,
float* py,
unsigned N,
const float alpha,
unsigned repetitions) {
cublasStatus_t status;
cublasHandle_t handle;
std::cout << "Launching saxpy CUBLAS" << std::endl;
status = cublasCreate(&handle);
assert(status == CUBLAS_STATUS_SUCCESS);
for (int i = 0; i < repetitions; i++) {
status = cublasSaxpy(handle, N, &alpha, px, 1, py, 1);
assert(status == CUBLAS_STATUS_SUCCESS);
}
std::cout << std::endl;
}
template <>
void run_saxpy_cublas(const double* px,
double* py,
unsigned N,
const double alpha,
unsigned repetitions) {
cublasStatus_t status;
cublasHandle_t handle;
std::cout << "Launching saxpy CUBLAS" << std::endl;
status = cublasCreate(&handle);
assert(status == CUBLAS_STATUS_SUCCESS);
for (int i = 0; i < repetitions; i++) {
status = cublasDaxpy(handle, N, &alpha, px, 1, py, 1);
assert(status == CUBLAS_STATUS_SUCCESS);
}
std::cout << std::endl;
}
template <typename T>
void run_saxpy_cub(const T* px,
const T* py,
T* pz,
unsigned N,
T alpha,
unsigned repetitions) {
cuda::error err;
unsigned block_size_x = 128;
unsigned block_size_y = 1;
unsigned block_size_z = 1;
dim3 dimGrid = getGridDimensions(N, 1, 1, block_size_x, block_size_y, block_size_z);
dim3 dimBlock( block_size_x, block_size_y, block_size_z );
unsigned block_size2_x = 128;
unsigned block_size2_y = 1;
unsigned block_size2_z = 1;
const unsigned unroll2 = 2;
dim3 dimGrid2 = getGridDimensions(N/unroll2, 1, 1, block_size2_x, block_size2_y, block_size2_z);
dim3 dimBlock2( block_size2_x, block_size2_y, block_size2_z );
unsigned block_size4_x = 128;
unsigned block_size4_y = 1;
unsigned block_size4_z = 1;
const unsigned unroll4 = 4;
dim3 dimGrid4 = getGridDimensions(N/unroll4, 1, 1, block_size4_x, block_size4_y, block_size4_z);
dim3 dimBlock4( block_size4_x, block_size4_y, block_size4_z );
std::cout << "Launching saxpy CUB kernels" << std::endl;
std::cout << "Grid 1 [" << dimGrid.x << "," << dimGrid.y << "," << dimGrid.z << "]" << std::endl;
std::cout << "Grid 2 [" << dimGrid2.x << "," << dimGrid2.y << "," << dimGrid2.z << "]" << std::endl;
std::cout << "Grid 4 [" << dimGrid4.x << "," << dimGrid4.y << "," << dimGrid4.z << "]" << std::endl;
std::cout << "Block 1 [" << dimBlock.x << "," << dimBlock.y << "," << dimBlock.z << "]" << std::endl;
std::cout << "Block 2 [" << dimBlock2.x << "," << dimBlock2.y << "," << dimBlock2.z << "]" << std::endl;
std::cout << "Block 4 [" << dimBlock4.x << "," << dimBlock4.y << "," << dimBlock4.z << "]" << std::endl;
for (int i = 0; i < repetitions; i++) {
saxpy_gpu_cub<T, 128, 1><<<dimGrid, dimBlock>>>(px, py, pz, N, alpha);
err = cudaGetLastError();
saxpy_gpu_cub<T, 128, unroll2><<<dimGrid2, dimBlock2>>>(px, py, pz, N, alpha);
err = cudaGetLastError();
saxpy_gpu_cub<T, 128, unroll4><<<dimGrid4, dimBlock4>>>(px, py, pz, N, alpha);
err = cudaGetLastError();
}
std::cout << std::endl;
err = cudaDeviceSynchronize();
}
template <typename T>
void saxpy_c(cuda::device& gpu, unsigned N, unsigned repetitions) {
try {
cuda::device gpu;
cuda::error err;
cout << "--- SAXPY C ---" << endl;
size_t free, total;
gpu.getMemInfo(free, total);
size_t allocated_by_os = total - free;
size_t est_program_alloc = 3 * N * sizeof(T);
T *px, *py, *pz;
err = cudaMalloc((void**)&px, N * sizeof(T));
err = cudaMalloc((void**)&py, N * sizeof(T));
err = cudaMalloc((void**)&pz, N * sizeof(T));
T alpha = 0.8;
gpu.getMemInfo(free, total);
cout << "Free mem: " << free/(1024*1024) << " / " << total/(1024*1024) << " MB" << endl;
size_t real_program_alloc = total - free - allocated_by_os;
float factor = (real_program_alloc - est_program_alloc)/ static_cast<float>(real_program_alloc);
cout << "Mem allocated by os: " << allocated_by_os/(1024*1024) << " MB" << endl;
cout << "Mem allocated by program[Est]: " << est_program_alloc/(1024*1024) << " MB" << endl;
cout << "Mem allocated by program[Real]: " << real_program_alloc/(1024*1024) << " MB" << endl;
cout << "Difference " << factor << endl;
run_saxpy_c(px, py, pz, N, alpha, repetitions);
err = cudaFree(px);
err = cudaFree(py);
err = cudaFree(pz);
} catch(cuda::cuda_exception error) {
std::cout << error.what() << std::endl;
}
}
template <typename T>
void saxpy_cpp(cuda::device& gpu, unsigned N, unsigned repetitions) {
try {
cuda::error err;
cout << "--- SAXPY Cpp ---" << endl;
size_t free, total, allocated_by_os;
gpu.getMemInfo(free, total);
allocated_by_os = total - free;
cuda::vector<T> *a = new cuda::vector<T>(N);
cuda::vector<T> *b = new cuda::vector<T>(N);
cuda::vector<T> *c = new cuda::vector<T>(N);
T alpha = 0.8;
gpu.getMemInfo(free, total);
cout << "Free mem: " << free/(1024*1024) << " / " << total/(1024*1024) << " MB" << endl;
size_t est_program_alloc = 3 * N * sizeof(T);
size_t real_program_alloc = total - free - allocated_by_os;
float factor = (real_program_alloc - est_program_alloc)/ static_cast<float>(real_program_alloc);
cout << "Mem allocated by os: " << allocated_by_os/(1024*1024) << " MB" << endl;
cout << "Mem allocated by program[Real]: " << real_program_alloc/(1024*1024) << " MB" << endl;
cout << "Mem allocated by program[Est]: " << est_program_alloc/(1024*1024) << " MB" << endl;
cout << "Difference " << factor << endl;
run_saxpy_cpp(*a, *b, *c, N, alpha, repetitions);
delete(a);
delete(b);
delete(c);
} catch(cuda::cuda_exception error) {
std::cout << error.what() << std::endl;
}
}
template <typename T>
void saxpy_cublas(cuda::device& gpu, unsigned N, unsigned repetitions) {
try {
cuda::device gpu;
cuda::error err;
cout << "--- SAXPY Cublas ---" << endl;
size_t free, total, allocated_by_os;
gpu.getMemInfo(free, total);
allocated_by_os = total - free;
T *px, *py;
err = cudaMalloc((void**)&px, N * sizeof(T));
err = cudaMalloc((void**)&py, N * sizeof(T));
const T alpha = 0.8;
gpu.getMemInfo(free, total);
cout << "Free mem: " << free/(1024*1024) << " / " << total/(1024*1024) << " MB" << endl;
size_t est_program_alloc = 2 * N * sizeof(T);
size_t real_program_alloc = total - free - allocated_by_os;
float factor = (real_program_alloc - est_program_alloc)/ static_cast<float>(real_program_alloc);
cout << "Mem allocated by os: " << allocated_by_os/(1024*1024) << " MB" << endl;
cout << "Mem allocated by program[Real]: " << real_program_alloc/(1024*1024) << " MB" << endl;
cout << "Mem allocated by program[Est]: " << est_program_alloc/(1024*1024) << " MB" << endl;
cout << "Difference " << factor << endl;
run_saxpy_cublas(px, py, N, alpha, repetitions);
err = cudaFree(px);
err = cudaFree(py);
} catch(cuda::cuda_exception error) {
std::cout << error.what() << std::endl;
}
}
template <typename T>
void saxpy_cub(cuda::device& gpu, unsigned N, unsigned repetitions) {
try {
cuda::device gpu;
cuda::error err;
cout << "--- SAXPY CUB ---" << endl;
size_t free, total, allocated_by_os;
gpu.getMemInfo(free, total);
allocated_by_os = total - free;
T *px, *py, *pz;
err = cudaMalloc((void**)&px, N * sizeof(T));
err = cudaMalloc((void**)&py, N * sizeof(T));
err = cudaMalloc((void**)&pz, N * sizeof(T));
T alpha = 0.8;
gpu.getMemInfo(free, total);
cout << "Free mem: " << free/(1024*1024) << " / " << total/(1024*1024) << " MB" << endl;
size_t est_program_alloc = 3 * N * sizeof(T);
size_t real_program_alloc = total - free - allocated_by_os;
float factor = (real_program_alloc - est_program_alloc)/ static_cast<float>(real_program_alloc);
cout << "Mem allocated by os: " << allocated_by_os/(1024*1024) << " MB" << endl;
cout << "Mem allocated by program[Real]: " << real_program_alloc/(1024*1024) << " MB" << endl;
cout << "Mem allocated by program[Est]: " << est_program_alloc/(1024*1024) << " MB" << endl;
cout << "Difference " << factor << endl;
run_saxpy_cub(px, py, pz, N, alpha, repetitions);
err = cudaFree(px);
err = cudaFree(py);
err = cudaFree(pz);
} catch(cuda::cuda_exception error) {
std::cout << error.what() << std::endl;
}
}
void launch_saxpy(cuda::device& gpu, unsigned N, unsigned repetitions)
{
saxpy_c<float>(gpu, N, repetitions);
//saxpy_cpp<float>(gpu, N, repetitions);
saxpy_cublas<float>(gpu, N, repetitions);
saxpy_cub<float>(gpu, N, repetitions);
saxpy_c<double>(gpu, N, repetitions);
//saxpy_cpp<double>(gpu, N, repetitions);
saxpy_cublas<double>(gpu, N, repetitions);
saxpy_cub<double>(gpu, N, repetitions);
}
|
53baa686d85029bb4b9b4923128585dea332e70e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "hip/hip_runtime.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
hipError_t error = hipGetLastError ();
if (error != hipSuccess) {
printf ("CUDA error : %s, %s\n", message, hipGetErrorString (error));
exit(-1);
}
}
__global__ void j3d27pt (double * __restrict__ t_in, double * __restrict__ t_out, int N) {
//Determing the block's indices
int i0 = (int)(blockIdx.x)*(int)(blockDim.x) + 1;
int i = max(i0,1) + (int)(threadIdx.x);
int j0 = 4*(int)(blockIdx.y)*(int)(blockDim.y) + 1;
int j = max(j0,1) + 4*(int)(threadIdx.y);
int k0 = (int)(blockIdx.z)*(int)(blockDim.z) + 1;
int k = max(k0,1) + (int)(threadIdx.z);
double (*in)[514][514] = (double (*)[514][514])t_in;
double (*out)[514][514] = (double (*)[514][514])t_out;
if (i<=N-2 & j<=N-2 && k<=N-2) {
double outkc0jc0ic0 = 1.14 * in[k][j-1][i];
outkc0jc0ic0 += 0.75 * in[k-1][j-1][i-1];
outkc0jc0ic0 += 0.75 * in[k-1][j-1][i+1];
outkc0jc0ic0 += 0.75 * in[k+1][j-1][i-1];
outkc0jc0ic0 += 0.75 * in[k+1][j-1][i+1];
outkc0jc0ic0 += 1.031 * in[k-1][j-1][i];
outkc0jc0ic0 += 1.031 * in[k][j-1][i-1];
outkc0jc0ic0 += 1.031 * in[k][j-1][i+1];
outkc0jc0ic0 += 1.031 * in[k+1][j-1][i];
outkc0jc0ic0 += 0.125 * in[k][j][i];
double outkc0jp1ic0 = 1.14 * in[k][j][i];
outkc0jc0ic0 += 1.14 * in[k-1][j][i];
outkc0jp1ic0 += 1.031 * in[k-1][j][i];
outkc0jc0ic0 += 1.14 * in[k+1][j][i];
outkc0jp1ic0 += 1.031 * in[k+1][j][i];
outkc0jc0ic0 += 1.14 * in[k][j+1][i];
outkc0jp1ic0 += 0.125 * in[k][j+1][i];
double outkc0jp2ic0 = 1.14 * in[k][j+1][i];
outkc0jc0ic0 += 1.14 * in[k][j][i-1];
outkc0jp1ic0 += 1.031 * in[k][j][i-1];
outkc0jc0ic0 += 1.14 * in[k][j][i+1];
outkc0jp1ic0 += 1.031 * in[k][j][i+1];
outkc0jc0ic0 += 0.75 * in[k-1][j+1][i-1];
outkc0jp1ic0 += 1.031 * in[k-1][j+1][i-1];
outkc0jp2ic0 += 0.75 * in[k-1][j+1][i-1];
outkc0jc0ic0 += 0.75 * in[k-1][j+1][i+1];
outkc0jp1ic0 += 1.031 * in[k-1][j+1][i+1];
outkc0jp2ic0 += 0.75 * in[k-1][j+1][i+1];
outkc0jc0ic0 += 0.75 * in[k+1][j+1][i-1];
outkc0jp1ic0 += 1.031 * in[k+1][j+1][i-1];
outkc0jp2ic0 += 0.75 * in[k+1][j+1][i-1];
outkc0jc0ic0 += 0.75 * in[k+1][j+1][i+1];
outkc0jp1ic0 += 1.031 * in[k+1][j+1][i+1];
outkc0jp2ic0 += 0.75 * in[k+1][j+1][i+1];
outkc0jc0ic0 += 1.031 * in[k-1][j][i-1];
outkc0jp1ic0 += 0.75 * in[k-1][j][i-1];
outkc0jc0ic0 += 1.031 * in[k-1][j][i+1];
outkc0jp1ic0 += 0.75 * in[k-1][j][i+1];
outkc0jc0ic0 += 1.031 * in[k-1][j+1][i];
outkc0jp1ic0 += 1.14 * in[k-1][j+1][i];
outkc0jp2ic0 += 1.031 * in[k-1][j+1][i];
outkc0jc0ic0 += 1.031 * in[k][j+1][i-1];
outkc0jp1ic0 += 1.14 * in[k][j+1][i-1];
outkc0jp2ic0 += 1.031 * in[k][j+1][i-1];
outkc0jc0ic0 += 1.031 * in[k][j+1][i+1];
outkc0jp1ic0 += 1.14 * in[k][j+1][i+1];
outkc0jp2ic0 += 1.031 * in[k][j+1][i+1];
outkc0jc0ic0 += 1.031 * in[k+1][j][i-1];
outkc0jp1ic0 += 0.75 * in[k+1][j][i-1];
outkc0jc0ic0 += 1.031 * in[k+1][j][i+1];
outkc0jp1ic0 += 0.75 * in[k+1][j][i+1];
outkc0jc0ic0 += 1.031 * in[k+1][j+1][i];
outkc0jp1ic0 += 1.14 * in[k+1][j+1][i];
outkc0jp2ic0 += 1.031 * in[k+1][j+1][i];
outkc0jp1ic0 += 1.14 * in[k][j+2][i];
outkc0jp2ic0 += 0.125 * in[k][j+2][i];
double outkc0jp3ic0 = 1.14 * in[k][j+2][i];
outkc0jp1ic0 += 0.75 * in[k-1][j+2][i-1];
outkc0jp2ic0 += 1.031 * in[k-1][j+2][i-1];
outkc0jp3ic0 += 0.75 * in[k-1][j+2][i-1];
outkc0jp1ic0 += 0.75 * in[k-1][j+2][i+1];
outkc0jp2ic0 += 1.031 * in[k-1][j+2][i+1];
outkc0jp3ic0 += 0.75 * in[k-1][j+2][i+1];
outkc0jp1ic0 += 0.75 * in[k+1][j+2][i-1];
outkc0jp2ic0 += 1.031 * in[k+1][j+2][i-1];
outkc0jp3ic0 += 0.75 * in[k+1][j+2][i-1];
outkc0jp1ic0 += 0.75 * in[k+1][j+2][i+1];
outkc0jp2ic0 += 1.031 * in[k+1][j+2][i+1];
outkc0jp3ic0 += 0.75 * in[k+1][j+2][i+1];
outkc0jp1ic0 += 1.031 * in[k-1][j+2][i];
outkc0jp2ic0 += 1.14 * in[k-1][j+2][i];
outkc0jp3ic0 += 1.031 * in[k-1][j+2][i];
outkc0jp1ic0 += 1.031 * in[k][j+2][i-1];
outkc0jp2ic0 += 1.14 * in[k][j+2][i-1];
outkc0jp3ic0 += 1.031 * in[k][j+2][i-1];
outkc0jp1ic0 += 1.031 * in[k][j+2][i+1];
outkc0jp2ic0 += 1.14 * in[k][j+2][i+1];
outkc0jp3ic0 += 1.031 * in[k][j+2][i+1];
outkc0jp1ic0 += 1.031 * in[k+1][j+2][i];
outkc0jp2ic0 += 1.14 * in[k+1][j+2][i];
outkc0jp3ic0 += 1.031 * in[k+1][j+2][i];
outkc0jp2ic0 += 1.14 * in[k][j+3][i];
outkc0jp3ic0 += 0.125 * in[k][j+3][i];
outkc0jp2ic0 += 0.75 * in[k-1][j+3][i-1];
outkc0jp3ic0 += 1.031 * in[k-1][j+3][i-1];
outkc0jp2ic0 += 0.75 * in[k-1][j+3][i+1];
outkc0jp3ic0 += 1.031 * in[k-1][j+3][i+1];
outkc0jp2ic0 += 0.75 * in[k+1][j+3][i-1];
outkc0jp3ic0 += 1.031 * in[k+1][j+3][i-1];
outkc0jp2ic0 += 0.75 * in[k+1][j+3][i+1];
outkc0jp3ic0 += 1.031 * in[k+1][j+3][i+1];
outkc0jp2ic0 += 1.031 * in[k-1][j+3][i];
outkc0jp3ic0 += 1.14 * in[k-1][j+3][i];
outkc0jp2ic0 += 1.031 * in[k][j+3][i-1];
outkc0jp3ic0 += 1.14 * in[k][j+3][i-1];
outkc0jp2ic0 += 1.031 * in[k][j+3][i+1];
outkc0jp3ic0 += 1.14 * in[k][j+3][i+1];
outkc0jp2ic0 += 1.031 * in[k+1][j+3][i];
outkc0jp3ic0 += 1.14 * in[k+1][j+3][i];
outkc0jp3ic0 += 1.14 * in[k][j+4][i];
outkc0jp3ic0 += 0.75 * in[k-1][j+4][i-1];
outkc0jp3ic0 += 0.75 * in[k-1][j+4][i+1];
outkc0jp3ic0 += 0.75 * in[k+1][j+4][i-1];
outkc0jp3ic0 += 0.75 * in[k+1][j+4][i+1];
outkc0jp3ic0 += 1.031 * in[k-1][j+4][i];
outkc0jp3ic0 += 1.031 * in[k][j+4][i-1];
outkc0jp3ic0 += 1.031 * in[k][j+4][i+1];
outkc0jp3ic0 += 1.031 * in[k+1][j+4][i];
out[k][j][i] = outkc0jc0ic0;
out[k][j+1][i] = outkc0jp1ic0;
out[k][j+2][i] = outkc0jp2ic0;
out[k][j+3][i] = outkc0jp3ic0;
}
}
extern "C" void host_code (double *h_in, double *h_out, int N) {
double *in;
hipMalloc (&in, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for in\n");
hipMemcpy (in, h_in, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *out;
hipMalloc (&out, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for out\n");
dim3 blockconfig (32,4,4);
dim3 gridconfig (ceil(N-2, blockconfig.x), ceil(N-2, 4*blockconfig.y), ceil(N-2, blockconfig.z));
hipLaunchKernelGGL(( j3d27pt), dim3(gridconfig), dim3(blockconfig), 0, 0, in, out, N);
hipMemcpy (h_out, out, sizeof(double)*N*N*N, hipMemcpyDeviceToHost);
hipFree (in);
hipFree (out);
}
|
53baa686d85029bb4b9b4923128585dea332e70e.cu
|
#include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void j3d27pt (double * __restrict__ t_in, double * __restrict__ t_out, int N) {
//Determing the block's indices
int i0 = (int)(blockIdx.x)*(int)(blockDim.x) + 1;
int i = max(i0,1) + (int)(threadIdx.x);
int j0 = 4*(int)(blockIdx.y)*(int)(blockDim.y) + 1;
int j = max(j0,1) + 4*(int)(threadIdx.y);
int k0 = (int)(blockIdx.z)*(int)(blockDim.z) + 1;
int k = max(k0,1) + (int)(threadIdx.z);
double (*in)[514][514] = (double (*)[514][514])t_in;
double (*out)[514][514] = (double (*)[514][514])t_out;
if (i<=N-2 & j<=N-2 && k<=N-2) {
double outkc0jc0ic0 = 1.14 * in[k][j-1][i];
outkc0jc0ic0 += 0.75 * in[k-1][j-1][i-1];
outkc0jc0ic0 += 0.75 * in[k-1][j-1][i+1];
outkc0jc0ic0 += 0.75 * in[k+1][j-1][i-1];
outkc0jc0ic0 += 0.75 * in[k+1][j-1][i+1];
outkc0jc0ic0 += 1.031 * in[k-1][j-1][i];
outkc0jc0ic0 += 1.031 * in[k][j-1][i-1];
outkc0jc0ic0 += 1.031 * in[k][j-1][i+1];
outkc0jc0ic0 += 1.031 * in[k+1][j-1][i];
outkc0jc0ic0 += 0.125 * in[k][j][i];
double outkc0jp1ic0 = 1.14 * in[k][j][i];
outkc0jc0ic0 += 1.14 * in[k-1][j][i];
outkc0jp1ic0 += 1.031 * in[k-1][j][i];
outkc0jc0ic0 += 1.14 * in[k+1][j][i];
outkc0jp1ic0 += 1.031 * in[k+1][j][i];
outkc0jc0ic0 += 1.14 * in[k][j+1][i];
outkc0jp1ic0 += 0.125 * in[k][j+1][i];
double outkc0jp2ic0 = 1.14 * in[k][j+1][i];
outkc0jc0ic0 += 1.14 * in[k][j][i-1];
outkc0jp1ic0 += 1.031 * in[k][j][i-1];
outkc0jc0ic0 += 1.14 * in[k][j][i+1];
outkc0jp1ic0 += 1.031 * in[k][j][i+1];
outkc0jc0ic0 += 0.75 * in[k-1][j+1][i-1];
outkc0jp1ic0 += 1.031 * in[k-1][j+1][i-1];
outkc0jp2ic0 += 0.75 * in[k-1][j+1][i-1];
outkc0jc0ic0 += 0.75 * in[k-1][j+1][i+1];
outkc0jp1ic0 += 1.031 * in[k-1][j+1][i+1];
outkc0jp2ic0 += 0.75 * in[k-1][j+1][i+1];
outkc0jc0ic0 += 0.75 * in[k+1][j+1][i-1];
outkc0jp1ic0 += 1.031 * in[k+1][j+1][i-1];
outkc0jp2ic0 += 0.75 * in[k+1][j+1][i-1];
outkc0jc0ic0 += 0.75 * in[k+1][j+1][i+1];
outkc0jp1ic0 += 1.031 * in[k+1][j+1][i+1];
outkc0jp2ic0 += 0.75 * in[k+1][j+1][i+1];
outkc0jc0ic0 += 1.031 * in[k-1][j][i-1];
outkc0jp1ic0 += 0.75 * in[k-1][j][i-1];
outkc0jc0ic0 += 1.031 * in[k-1][j][i+1];
outkc0jp1ic0 += 0.75 * in[k-1][j][i+1];
outkc0jc0ic0 += 1.031 * in[k-1][j+1][i];
outkc0jp1ic0 += 1.14 * in[k-1][j+1][i];
outkc0jp2ic0 += 1.031 * in[k-1][j+1][i];
outkc0jc0ic0 += 1.031 * in[k][j+1][i-1];
outkc0jp1ic0 += 1.14 * in[k][j+1][i-1];
outkc0jp2ic0 += 1.031 * in[k][j+1][i-1];
outkc0jc0ic0 += 1.031 * in[k][j+1][i+1];
outkc0jp1ic0 += 1.14 * in[k][j+1][i+1];
outkc0jp2ic0 += 1.031 * in[k][j+1][i+1];
outkc0jc0ic0 += 1.031 * in[k+1][j][i-1];
outkc0jp1ic0 += 0.75 * in[k+1][j][i-1];
outkc0jc0ic0 += 1.031 * in[k+1][j][i+1];
outkc0jp1ic0 += 0.75 * in[k+1][j][i+1];
outkc0jc0ic0 += 1.031 * in[k+1][j+1][i];
outkc0jp1ic0 += 1.14 * in[k+1][j+1][i];
outkc0jp2ic0 += 1.031 * in[k+1][j+1][i];
outkc0jp1ic0 += 1.14 * in[k][j+2][i];
outkc0jp2ic0 += 0.125 * in[k][j+2][i];
double outkc0jp3ic0 = 1.14 * in[k][j+2][i];
outkc0jp1ic0 += 0.75 * in[k-1][j+2][i-1];
outkc0jp2ic0 += 1.031 * in[k-1][j+2][i-1];
outkc0jp3ic0 += 0.75 * in[k-1][j+2][i-1];
outkc0jp1ic0 += 0.75 * in[k-1][j+2][i+1];
outkc0jp2ic0 += 1.031 * in[k-1][j+2][i+1];
outkc0jp3ic0 += 0.75 * in[k-1][j+2][i+1];
outkc0jp1ic0 += 0.75 * in[k+1][j+2][i-1];
outkc0jp2ic0 += 1.031 * in[k+1][j+2][i-1];
outkc0jp3ic0 += 0.75 * in[k+1][j+2][i-1];
outkc0jp1ic0 += 0.75 * in[k+1][j+2][i+1];
outkc0jp2ic0 += 1.031 * in[k+1][j+2][i+1];
outkc0jp3ic0 += 0.75 * in[k+1][j+2][i+1];
outkc0jp1ic0 += 1.031 * in[k-1][j+2][i];
outkc0jp2ic0 += 1.14 * in[k-1][j+2][i];
outkc0jp3ic0 += 1.031 * in[k-1][j+2][i];
outkc0jp1ic0 += 1.031 * in[k][j+2][i-1];
outkc0jp2ic0 += 1.14 * in[k][j+2][i-1];
outkc0jp3ic0 += 1.031 * in[k][j+2][i-1];
outkc0jp1ic0 += 1.031 * in[k][j+2][i+1];
outkc0jp2ic0 += 1.14 * in[k][j+2][i+1];
outkc0jp3ic0 += 1.031 * in[k][j+2][i+1];
outkc0jp1ic0 += 1.031 * in[k+1][j+2][i];
outkc0jp2ic0 += 1.14 * in[k+1][j+2][i];
outkc0jp3ic0 += 1.031 * in[k+1][j+2][i];
outkc0jp2ic0 += 1.14 * in[k][j+3][i];
outkc0jp3ic0 += 0.125 * in[k][j+3][i];
outkc0jp2ic0 += 0.75 * in[k-1][j+3][i-1];
outkc0jp3ic0 += 1.031 * in[k-1][j+3][i-1];
outkc0jp2ic0 += 0.75 * in[k-1][j+3][i+1];
outkc0jp3ic0 += 1.031 * in[k-1][j+3][i+1];
outkc0jp2ic0 += 0.75 * in[k+1][j+3][i-1];
outkc0jp3ic0 += 1.031 * in[k+1][j+3][i-1];
outkc0jp2ic0 += 0.75 * in[k+1][j+3][i+1];
outkc0jp3ic0 += 1.031 * in[k+1][j+3][i+1];
outkc0jp2ic0 += 1.031 * in[k-1][j+3][i];
outkc0jp3ic0 += 1.14 * in[k-1][j+3][i];
outkc0jp2ic0 += 1.031 * in[k][j+3][i-1];
outkc0jp3ic0 += 1.14 * in[k][j+3][i-1];
outkc0jp2ic0 += 1.031 * in[k][j+3][i+1];
outkc0jp3ic0 += 1.14 * in[k][j+3][i+1];
outkc0jp2ic0 += 1.031 * in[k+1][j+3][i];
outkc0jp3ic0 += 1.14 * in[k+1][j+3][i];
outkc0jp3ic0 += 1.14 * in[k][j+4][i];
outkc0jp3ic0 += 0.75 * in[k-1][j+4][i-1];
outkc0jp3ic0 += 0.75 * in[k-1][j+4][i+1];
outkc0jp3ic0 += 0.75 * in[k+1][j+4][i-1];
outkc0jp3ic0 += 0.75 * in[k+1][j+4][i+1];
outkc0jp3ic0 += 1.031 * in[k-1][j+4][i];
outkc0jp3ic0 += 1.031 * in[k][j+4][i-1];
outkc0jp3ic0 += 1.031 * in[k][j+4][i+1];
outkc0jp3ic0 += 1.031 * in[k+1][j+4][i];
out[k][j][i] = outkc0jc0ic0;
out[k][j+1][i] = outkc0jp1ic0;
out[k][j+2][i] = outkc0jp2ic0;
out[k][j+3][i] = outkc0jp3ic0;
}
}
extern "C" void host_code (double *h_in, double *h_out, int N) {
double *in;
cudaMalloc (&in, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for in\n");
cudaMemcpy (in, h_in, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *out;
cudaMalloc (&out, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for out\n");
dim3 blockconfig (32,4,4);
dim3 gridconfig (ceil(N-2, blockconfig.x), ceil(N-2, 4*blockconfig.y), ceil(N-2, blockconfig.z));
j3d27pt<<<gridconfig, blockconfig>>> (in, out, N);
cudaMemcpy (h_out, out, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
cudaFree (in);
cudaFree (out);
}
|
b088d99457be5d7049480eb59ccd479d6f351224.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <cuda_utils.cuh>
#include <distance/fused_l2_nn.cuh>
#include <linalg/norm.cuh>
#include <random/rng.cuh>
#include "test_utils.h"
namespace MLCommon {
namespace Distance {
template <typename DataT, bool Sqrt, typename ReduceOpT, int NWARPS>
__global__ void naiveKernel(hipcub::KeyValuePair<int, DataT> *min, DataT *x,
DataT *y, int m, int n, int k, int *workspace,
DataT maxVal) {
int midx = threadIdx.y + blockIdx.y * blockDim.y;
int nidx = threadIdx.x + blockIdx.x * blockDim.x;
DataT acc = DataT(0);
for (int i = 0; i < k; ++i) {
int xidx = i + midx * k;
int yidx = i + nidx * k;
auto diff = midx >= m || nidx >= n ? DataT(0) : x[xidx] - y[yidx];
acc += diff * diff;
}
if (Sqrt) {
acc = mySqrt(acc);
}
ReduceOpT redOp;
typedef hipcub::WarpReduce<hipcub::KeyValuePair<int, DataT>> WarpReduce;
__shared__ typename WarpReduce::TempStorage temp[NWARPS];
int warpId = threadIdx.x / WarpSize;
hipcub::KeyValuePair<int, DataT> tmp;
tmp.key = nidx;
tmp.value = midx >= m || nidx >= n ? maxVal : acc;
tmp = WarpReduce(temp[warpId]).Reduce(tmp, KVPMinReduce<int, DataT>());
if (threadIdx.x % WarpSize == 0 && midx < m) {
while (atomicCAS(workspace + midx, 0, 1) == 1)
;
__threadfence();
redOp(min + midx, tmp);
__threadfence();
atomicCAS(workspace + midx, 1, 0);
}
}
template <typename DataT, bool Sqrt>
void naive(hipcub::KeyValuePair<int, DataT> *min, DataT *x, DataT *y, int m, int n,
int k, int *workspace, hipStream_t stream) {
static const dim3 TPB(32, 16, 1);
dim3 nblks(ceildiv(n, (int)TPB.x), ceildiv(m, (int)TPB.y), 1);
CUDA_CHECK(hipMemsetAsync(workspace, 0, sizeof(int) * m, stream));
auto blks = ceildiv(m, 256);
MinAndDistanceReduceOp<int, DataT> op;
hipLaunchKernelGGL(( initKernel<DataT, hipcub::KeyValuePair<int, DataT>, int>)
, dim3(blks), dim3(256), 0, stream, min, m, std::numeric_limits<DataT>::max(), op);
CUDA_CHECK(hipGetLastError());
hipLaunchKernelGGL(( naiveKernel<DataT, Sqrt, MinAndDistanceReduceOp<int, DataT>, 16>)
, dim3(nblks), dim3(TPB), 0, stream, min, x, y, m, n, k, workspace,
std::numeric_limits<DataT>::max());
CUDA_CHECK(hipGetLastError());
}
template <typename DataT>
struct Inputs {
DataT tolerance;
int m, n, k;
unsigned long long int seed;
};
template <typename DataT, bool Sqrt>
class FusedL2NNTest : public ::testing::TestWithParam<Inputs<DataT>> {
public:
void SetUp() override {
params = ::testing::TestWithParam<Inputs<DataT>>::GetParam();
Random::Rng r(params.seed);
int m = params.m;
int n = params.n;
int k = params.k;
CUDA_CHECK(hipStreamCreate(&stream));
allocate(x, m * k);
allocate(y, n * k);
allocate(xn, m);
allocate(yn, n);
allocate(workspace, sizeof(int) * m);
allocate(min, m);
allocate(min_ref, m);
r.uniform(x, m * k, DataT(-1.0), DataT(1.0), stream);
r.uniform(y, n * k, DataT(-1.0), DataT(1.0), stream);
generateGoldenResult();
LinAlg::rowNorm(xn, x, k, m, LinAlg::L2Norm, true, stream);
LinAlg::rowNorm(yn, y, k, n, LinAlg::L2Norm, true, stream);
}
void TearDown() override {
CUDA_CHECK(hipStreamSynchronize(stream));
CUDA_CHECK(hipStreamDestroy(stream));
CUDA_CHECK(hipFree(x));
CUDA_CHECK(hipFree(y));
CUDA_CHECK(hipFree(xn));
CUDA_CHECK(hipFree(yn));
CUDA_CHECK(hipFree(workspace));
CUDA_CHECK(hipFree(min_ref));
CUDA_CHECK(hipFree(min));
}
protected:
Inputs<DataT> params;
DataT *x, *y, *xn, *yn;
char *workspace;
hipcub::KeyValuePair<int, DataT> *min, *min_ref;
hipStream_t stream;
virtual void generateGoldenResult() {
int m = params.m;
int n = params.n;
int k = params.k;
naive<DataT, Sqrt>(min_ref, x, y, m, n, k, (int *)workspace, stream);
}
void runTest(hipcub::KeyValuePair<int, DataT> *out) {
int m = params.m;
int n = params.n;
int k = params.k;
MinAndDistanceReduceOp<int, DataT> redOp;
fusedL2NN<DataT, hipcub::KeyValuePair<int, DataT>, int>(
out, x, y, xn, yn, m, n, k, (void *)workspace, redOp, Sqrt, true, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
}
};
template <typename T>
struct CompareApproxAbsKVP {
typedef typename hipcub::KeyValuePair<int, T> KVP;
CompareApproxAbsKVP(T eps_) : eps(eps_) {}
bool operator()(const KVP &a, const KVP &b) const {
if (a.key != b.key) return false;
T diff = abs(abs(a.value) - abs(b.value));
T m = ::max(abs(a.value), abs(b.value));
T ratio = m >= eps ? diff / m : diff;
return (ratio <= eps);
}
private:
T eps;
};
template <typename T>
struct CompareExactKVP {
typedef typename hipcub::KeyValuePair<int, T> KVP;
bool operator()(const KVP &a, const KVP &b) const {
if (a.key != b.key) return false;
if (a.value != b.value) return false;
return true;
}
};
template <typename K, typename V, typename L>
::testing::AssertionResult devArrMatch(const hipcub::KeyValuePair<K, V> *expected,
const hipcub::KeyValuePair<K, V> *actual,
size_t size, L eq_compare,
hipStream_t stream = 0) {
typedef typename hipcub::KeyValuePair<K, V> KVP;
std::shared_ptr<KVP> exp_h(new KVP[size]);
std::shared_ptr<KVP> act_h(new KVP[size]);
updateHost<KVP>(exp_h.get(), expected, size, stream);
updateHost<KVP>(act_h.get(), actual, size, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
for (size_t i(0); i < size; ++i) {
auto exp = exp_h.get()[i];
auto act = act_h.get()[i];
if (!eq_compare(exp, act)) {
return ::testing::AssertionFailure()
<< "actual=" << act.key << "," << act.value
<< " != expected=" << exp.key << "," << exp.value << " @" << i;
}
}
return ::testing::AssertionSuccess();
}
const std::vector<Inputs<float>> inputsf = {
{0.001f, 32, 32, 32, 1234ULL}, {0.001f, 32, 64, 32, 1234ULL},
{0.001f, 64, 32, 32, 1234ULL}, {0.001f, 64, 64, 32, 1234ULL},
{0.001f, 128, 32, 32, 1234ULL}, {0.001f, 128, 64, 32, 1234ULL},
{0.001f, 128, 128, 64, 1234ULL}, {0.001f, 64, 128, 128, 1234ULL},
{0.001f, 32, 32, 34, 1234ULL}, {0.001f, 32, 64, 34, 1234ULL},
{0.001f, 64, 32, 34, 1234ULL}, {0.001f, 64, 64, 34, 1234ULL},
{0.001f, 128, 32, 34, 1234ULL}, {0.001f, 128, 64, 34, 1234ULL},
{0.001f, 128, 128, 66, 1234ULL}, {0.001f, 64, 128, 130, 1234ULL},
{0.001f, 32, 32, 33, 1234ULL}, {0.001f, 32, 64, 33, 1234ULL},
{0.001f, 64, 32, 33, 1234ULL}, {0.001f, 64, 64, 33, 1234ULL},
{0.001f, 128, 32, 33, 1234ULL}, {0.001f, 128, 64, 33, 1234ULL},
{0.001f, 128, 128, 65, 1234ULL}, {0.001f, 64, 128, 129, 1234ULL},
{0.006f, 1805, 134, 2, 1234ULL},
};
typedef FusedL2NNTest<float, false> FusedL2NNTestF_Sq;
TEST_P(FusedL2NNTestF_Sq, Result) {
runTest(min);
ASSERT_TRUE(devArrMatch(min_ref, min, params.m,
CompareApproxAbsKVP<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(FusedL2NNTests, FusedL2NNTestF_Sq,
::testing::ValuesIn(inputsf));
typedef FusedL2NNTest<float, true> FusedL2NNTestF_Sqrt;
TEST_P(FusedL2NNTestF_Sqrt, Result) {
runTest(min);
ASSERT_TRUE(devArrMatch(min_ref, min, params.m,
CompareApproxAbsKVP<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(FusedL2NNTests, FusedL2NNTestF_Sqrt,
::testing::ValuesIn(inputsf));
const std::vector<Inputs<double>> inputsd = {
{0.00001, 32, 32, 32, 1234ULL}, {0.00001, 32, 64, 32, 1234ULL},
{0.00001, 64, 32, 32, 1234ULL}, {0.00001, 64, 64, 32, 1234ULL},
{0.00001, 128, 32, 32, 1234ULL}, {0.00001, 128, 64, 32, 1234ULL},
{0.00001, 128, 128, 64, 1234ULL}, {0.00001, 64, 128, 128, 1234ULL},
{0.00001, 32, 32, 34, 1234ULL}, {0.00001, 32, 64, 34, 1234ULL},
{0.00001, 64, 32, 34, 1234ULL}, {0.00001, 64, 64, 34, 1234ULL},
{0.00001, 128, 32, 34, 1234ULL}, {0.00001, 128, 64, 34, 1234ULL},
{0.00001, 128, 128, 66, 1234ULL}, {0.00001, 64, 128, 130, 1234ULL},
{0.00001, 32, 32, 33, 1234ULL}, {0.00001, 32, 64, 33, 1234ULL},
{0.00001, 64, 32, 33, 1234ULL}, {0.00001, 64, 64, 33, 1234ULL},
{0.00001, 128, 32, 33, 1234ULL}, {0.00001, 128, 64, 33, 1234ULL},
{0.00001, 128, 128, 65, 1234ULL}, {0.00001, 64, 128, 129, 1234ULL},
{0.00001, 1805, 134, 2, 1234ULL},
};
typedef FusedL2NNTest<double, false> FusedL2NNTestD_Sq;
TEST_P(FusedL2NNTestD_Sq, Result) {
runTest(min);
ASSERT_TRUE(devArrMatch(min_ref, min, params.m,
CompareApproxAbsKVP<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(FusedL2NNTests, FusedL2NNTestD_Sq,
::testing::ValuesIn(inputsd));
typedef FusedL2NNTest<double, true> FusedL2NNTestD_Sqrt;
TEST_P(FusedL2NNTestD_Sqrt, Result) {
runTest(min);
ASSERT_TRUE(devArrMatch(min_ref, min, params.m,
CompareApproxAbsKVP<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(FusedL2NNTests, FusedL2NNTestD_Sqrt,
::testing::ValuesIn(inputsd));
/// This is to test output determinism of the prim
template <typename DataT, bool Sqrt>
class FusedL2NNDetTest : public FusedL2NNTest<DataT, Sqrt> {
void SetUp() override {
FusedL2NNTest<DataT, Sqrt>::SetUp();
int m = this->params.m;
allocate(min1, m);
}
void TearDown() override {
FusedL2NNTest<DataT, Sqrt>::TearDown();
CUDA_CHECK(hipFree(min1));
}
protected:
hipcub::KeyValuePair<int, DataT> *min1;
static const int NumRepeats = 100;
void generateGoldenResult() override {}
};
typedef FusedL2NNDetTest<float, false> FusedL2NNDetTestF_Sq;
TEST_P(FusedL2NNDetTestF_Sq, Result) {
runTest(min); // assumed to be golden
for (int i = 0; i < NumRepeats; ++i) {
runTest(min1);
ASSERT_TRUE(devArrMatch(min, min1, params.m, CompareExactKVP<float>()));
}
}
INSTANTIATE_TEST_CASE_P(FusedL2NNDetTests, FusedL2NNDetTestF_Sq,
::testing::ValuesIn(inputsf));
typedef FusedL2NNDetTest<float, true> FusedL2NNDetTestF_Sqrt;
TEST_P(FusedL2NNDetTestF_Sqrt, Result) {
runTest(min); // assumed to be golden
for (int i = 0; i < NumRepeats; ++i) {
runTest(min1);
ASSERT_TRUE(devArrMatch(min, min1, params.m, CompareExactKVP<float>()));
}
}
INSTANTIATE_TEST_CASE_P(FusedL2NNDetTests, FusedL2NNDetTestF_Sqrt,
::testing::ValuesIn(inputsf));
typedef FusedL2NNDetTest<double, false> FusedL2NNDetTestD_Sq;
TEST_P(FusedL2NNDetTestD_Sq, Result) {
runTest(min); // assumed to be golden
for (int i = 0; i < NumRepeats; ++i) {
runTest(min1);
ASSERT_TRUE(devArrMatch(min, min1, params.m, CompareExactKVP<double>()));
}
}
INSTANTIATE_TEST_CASE_P(FusedL2NNDetTests, FusedL2NNDetTestD_Sq,
::testing::ValuesIn(inputsd));
typedef FusedL2NNDetTest<double, true> FusedL2NNDetTestD_Sqrt;
TEST_P(FusedL2NNDetTestD_Sqrt, Result) {
runTest(min); // assumed to be golden
for (int i = 0; i < NumRepeats; ++i) {
runTest(min1);
ASSERT_TRUE(devArrMatch(min, min1, params.m, CompareExactKVP<double>()));
}
}
INSTANTIATE_TEST_CASE_P(FusedL2NNDetTests, FusedL2NNDetTestD_Sqrt,
::testing::ValuesIn(inputsd));
} // end namespace Distance
} // end namespace MLCommon
|
b088d99457be5d7049480eb59ccd479d6f351224.cu
|
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <cuda_utils.cuh>
#include <distance/fused_l2_nn.cuh>
#include <linalg/norm.cuh>
#include <random/rng.cuh>
#include "test_utils.h"
namespace MLCommon {
namespace Distance {
template <typename DataT, bool Sqrt, typename ReduceOpT, int NWARPS>
__global__ void naiveKernel(cub::KeyValuePair<int, DataT> *min, DataT *x,
DataT *y, int m, int n, int k, int *workspace,
DataT maxVal) {
int midx = threadIdx.y + blockIdx.y * blockDim.y;
int nidx = threadIdx.x + blockIdx.x * blockDim.x;
DataT acc = DataT(0);
for (int i = 0; i < k; ++i) {
int xidx = i + midx * k;
int yidx = i + nidx * k;
auto diff = midx >= m || nidx >= n ? DataT(0) : x[xidx] - y[yidx];
acc += diff * diff;
}
if (Sqrt) {
acc = mySqrt(acc);
}
ReduceOpT redOp;
typedef cub::WarpReduce<cub::KeyValuePair<int, DataT>> WarpReduce;
__shared__ typename WarpReduce::TempStorage temp[NWARPS];
int warpId = threadIdx.x / WarpSize;
cub::KeyValuePair<int, DataT> tmp;
tmp.key = nidx;
tmp.value = midx >= m || nidx >= n ? maxVal : acc;
tmp = WarpReduce(temp[warpId]).Reduce(tmp, KVPMinReduce<int, DataT>());
if (threadIdx.x % WarpSize == 0 && midx < m) {
while (atomicCAS(workspace + midx, 0, 1) == 1)
;
__threadfence();
redOp(min + midx, tmp);
__threadfence();
atomicCAS(workspace + midx, 1, 0);
}
}
template <typename DataT, bool Sqrt>
void naive(cub::KeyValuePair<int, DataT> *min, DataT *x, DataT *y, int m, int n,
int k, int *workspace, cudaStream_t stream) {
static const dim3 TPB(32, 16, 1);
dim3 nblks(ceildiv(n, (int)TPB.x), ceildiv(m, (int)TPB.y), 1);
CUDA_CHECK(cudaMemsetAsync(workspace, 0, sizeof(int) * m, stream));
auto blks = ceildiv(m, 256);
MinAndDistanceReduceOp<int, DataT> op;
initKernel<DataT, cub::KeyValuePair<int, DataT>, int>
<<<blks, 256, 0, stream>>>(min, m, std::numeric_limits<DataT>::max(), op);
CUDA_CHECK(cudaGetLastError());
naiveKernel<DataT, Sqrt, MinAndDistanceReduceOp<int, DataT>, 16>
<<<nblks, TPB, 0, stream>>>(min, x, y, m, n, k, workspace,
std::numeric_limits<DataT>::max());
CUDA_CHECK(cudaGetLastError());
}
template <typename DataT>
struct Inputs {
DataT tolerance;
int m, n, k;
unsigned long long int seed;
};
template <typename DataT, bool Sqrt>
class FusedL2NNTest : public ::testing::TestWithParam<Inputs<DataT>> {
public:
void SetUp() override {
params = ::testing::TestWithParam<Inputs<DataT>>::GetParam();
Random::Rng r(params.seed);
int m = params.m;
int n = params.n;
int k = params.k;
CUDA_CHECK(cudaStreamCreate(&stream));
allocate(x, m * k);
allocate(y, n * k);
allocate(xn, m);
allocate(yn, n);
allocate(workspace, sizeof(int) * m);
allocate(min, m);
allocate(min_ref, m);
r.uniform(x, m * k, DataT(-1.0), DataT(1.0), stream);
r.uniform(y, n * k, DataT(-1.0), DataT(1.0), stream);
generateGoldenResult();
LinAlg::rowNorm(xn, x, k, m, LinAlg::L2Norm, true, stream);
LinAlg::rowNorm(yn, y, k, n, LinAlg::L2Norm, true, stream);
}
void TearDown() override {
CUDA_CHECK(cudaStreamSynchronize(stream));
CUDA_CHECK(cudaStreamDestroy(stream));
CUDA_CHECK(cudaFree(x));
CUDA_CHECK(cudaFree(y));
CUDA_CHECK(cudaFree(xn));
CUDA_CHECK(cudaFree(yn));
CUDA_CHECK(cudaFree(workspace));
CUDA_CHECK(cudaFree(min_ref));
CUDA_CHECK(cudaFree(min));
}
protected:
Inputs<DataT> params;
DataT *x, *y, *xn, *yn;
char *workspace;
cub::KeyValuePair<int, DataT> *min, *min_ref;
cudaStream_t stream;
virtual void generateGoldenResult() {
int m = params.m;
int n = params.n;
int k = params.k;
naive<DataT, Sqrt>(min_ref, x, y, m, n, k, (int *)workspace, stream);
}
void runTest(cub::KeyValuePair<int, DataT> *out) {
int m = params.m;
int n = params.n;
int k = params.k;
MinAndDistanceReduceOp<int, DataT> redOp;
fusedL2NN<DataT, cub::KeyValuePair<int, DataT>, int>(
out, x, y, xn, yn, m, n, k, (void *)workspace, redOp, Sqrt, true, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
}
};
template <typename T>
struct CompareApproxAbsKVP {
typedef typename cub::KeyValuePair<int, T> KVP;
CompareApproxAbsKVP(T eps_) : eps(eps_) {}
bool operator()(const KVP &a, const KVP &b) const {
if (a.key != b.key) return false;
T diff = abs(abs(a.value) - abs(b.value));
T m = std::max(abs(a.value), abs(b.value));
T ratio = m >= eps ? diff / m : diff;
return (ratio <= eps);
}
private:
T eps;
};
template <typename T>
struct CompareExactKVP {
typedef typename cub::KeyValuePair<int, T> KVP;
bool operator()(const KVP &a, const KVP &b) const {
if (a.key != b.key) return false;
if (a.value != b.value) return false;
return true;
}
};
template <typename K, typename V, typename L>
::testing::AssertionResult devArrMatch(const cub::KeyValuePair<K, V> *expected,
const cub::KeyValuePair<K, V> *actual,
size_t size, L eq_compare,
cudaStream_t stream = 0) {
typedef typename cub::KeyValuePair<K, V> KVP;
std::shared_ptr<KVP> exp_h(new KVP[size]);
std::shared_ptr<KVP> act_h(new KVP[size]);
updateHost<KVP>(exp_h.get(), expected, size, stream);
updateHost<KVP>(act_h.get(), actual, size, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
for (size_t i(0); i < size; ++i) {
auto exp = exp_h.get()[i];
auto act = act_h.get()[i];
if (!eq_compare(exp, act)) {
return ::testing::AssertionFailure()
<< "actual=" << act.key << "," << act.value
<< " != expected=" << exp.key << "," << exp.value << " @" << i;
}
}
return ::testing::AssertionSuccess();
}
const std::vector<Inputs<float>> inputsf = {
{0.001f, 32, 32, 32, 1234ULL}, {0.001f, 32, 64, 32, 1234ULL},
{0.001f, 64, 32, 32, 1234ULL}, {0.001f, 64, 64, 32, 1234ULL},
{0.001f, 128, 32, 32, 1234ULL}, {0.001f, 128, 64, 32, 1234ULL},
{0.001f, 128, 128, 64, 1234ULL}, {0.001f, 64, 128, 128, 1234ULL},
{0.001f, 32, 32, 34, 1234ULL}, {0.001f, 32, 64, 34, 1234ULL},
{0.001f, 64, 32, 34, 1234ULL}, {0.001f, 64, 64, 34, 1234ULL},
{0.001f, 128, 32, 34, 1234ULL}, {0.001f, 128, 64, 34, 1234ULL},
{0.001f, 128, 128, 66, 1234ULL}, {0.001f, 64, 128, 130, 1234ULL},
{0.001f, 32, 32, 33, 1234ULL}, {0.001f, 32, 64, 33, 1234ULL},
{0.001f, 64, 32, 33, 1234ULL}, {0.001f, 64, 64, 33, 1234ULL},
{0.001f, 128, 32, 33, 1234ULL}, {0.001f, 128, 64, 33, 1234ULL},
{0.001f, 128, 128, 65, 1234ULL}, {0.001f, 64, 128, 129, 1234ULL},
{0.006f, 1805, 134, 2, 1234ULL},
};
typedef FusedL2NNTest<float, false> FusedL2NNTestF_Sq;
TEST_P(FusedL2NNTestF_Sq, Result) {
runTest(min);
ASSERT_TRUE(devArrMatch(min_ref, min, params.m,
CompareApproxAbsKVP<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(FusedL2NNTests, FusedL2NNTestF_Sq,
::testing::ValuesIn(inputsf));
typedef FusedL2NNTest<float, true> FusedL2NNTestF_Sqrt;
TEST_P(FusedL2NNTestF_Sqrt, Result) {
runTest(min);
ASSERT_TRUE(devArrMatch(min_ref, min, params.m,
CompareApproxAbsKVP<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(FusedL2NNTests, FusedL2NNTestF_Sqrt,
::testing::ValuesIn(inputsf));
const std::vector<Inputs<double>> inputsd = {
{0.00001, 32, 32, 32, 1234ULL}, {0.00001, 32, 64, 32, 1234ULL},
{0.00001, 64, 32, 32, 1234ULL}, {0.00001, 64, 64, 32, 1234ULL},
{0.00001, 128, 32, 32, 1234ULL}, {0.00001, 128, 64, 32, 1234ULL},
{0.00001, 128, 128, 64, 1234ULL}, {0.00001, 64, 128, 128, 1234ULL},
{0.00001, 32, 32, 34, 1234ULL}, {0.00001, 32, 64, 34, 1234ULL},
{0.00001, 64, 32, 34, 1234ULL}, {0.00001, 64, 64, 34, 1234ULL},
{0.00001, 128, 32, 34, 1234ULL}, {0.00001, 128, 64, 34, 1234ULL},
{0.00001, 128, 128, 66, 1234ULL}, {0.00001, 64, 128, 130, 1234ULL},
{0.00001, 32, 32, 33, 1234ULL}, {0.00001, 32, 64, 33, 1234ULL},
{0.00001, 64, 32, 33, 1234ULL}, {0.00001, 64, 64, 33, 1234ULL},
{0.00001, 128, 32, 33, 1234ULL}, {0.00001, 128, 64, 33, 1234ULL},
{0.00001, 128, 128, 65, 1234ULL}, {0.00001, 64, 128, 129, 1234ULL},
{0.00001, 1805, 134, 2, 1234ULL},
};
typedef FusedL2NNTest<double, false> FusedL2NNTestD_Sq;
TEST_P(FusedL2NNTestD_Sq, Result) {
runTest(min);
ASSERT_TRUE(devArrMatch(min_ref, min, params.m,
CompareApproxAbsKVP<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(FusedL2NNTests, FusedL2NNTestD_Sq,
::testing::ValuesIn(inputsd));
typedef FusedL2NNTest<double, true> FusedL2NNTestD_Sqrt;
TEST_P(FusedL2NNTestD_Sqrt, Result) {
runTest(min);
ASSERT_TRUE(devArrMatch(min_ref, min, params.m,
CompareApproxAbsKVP<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(FusedL2NNTests, FusedL2NNTestD_Sqrt,
::testing::ValuesIn(inputsd));
/// This is to test output determinism of the prim
template <typename DataT, bool Sqrt>
class FusedL2NNDetTest : public FusedL2NNTest<DataT, Sqrt> {
void SetUp() override {
FusedL2NNTest<DataT, Sqrt>::SetUp();
int m = this->params.m;
allocate(min1, m);
}
void TearDown() override {
FusedL2NNTest<DataT, Sqrt>::TearDown();
CUDA_CHECK(cudaFree(min1));
}
protected:
cub::KeyValuePair<int, DataT> *min1;
static const int NumRepeats = 100;
void generateGoldenResult() override {}
};
typedef FusedL2NNDetTest<float, false> FusedL2NNDetTestF_Sq;
TEST_P(FusedL2NNDetTestF_Sq, Result) {
runTest(min); // assumed to be golden
for (int i = 0; i < NumRepeats; ++i) {
runTest(min1);
ASSERT_TRUE(devArrMatch(min, min1, params.m, CompareExactKVP<float>()));
}
}
INSTANTIATE_TEST_CASE_P(FusedL2NNDetTests, FusedL2NNDetTestF_Sq,
::testing::ValuesIn(inputsf));
typedef FusedL2NNDetTest<float, true> FusedL2NNDetTestF_Sqrt;
TEST_P(FusedL2NNDetTestF_Sqrt, Result) {
runTest(min); // assumed to be golden
for (int i = 0; i < NumRepeats; ++i) {
runTest(min1);
ASSERT_TRUE(devArrMatch(min, min1, params.m, CompareExactKVP<float>()));
}
}
INSTANTIATE_TEST_CASE_P(FusedL2NNDetTests, FusedL2NNDetTestF_Sqrt,
::testing::ValuesIn(inputsf));
typedef FusedL2NNDetTest<double, false> FusedL2NNDetTestD_Sq;
TEST_P(FusedL2NNDetTestD_Sq, Result) {
runTest(min); // assumed to be golden
for (int i = 0; i < NumRepeats; ++i) {
runTest(min1);
ASSERT_TRUE(devArrMatch(min, min1, params.m, CompareExactKVP<double>()));
}
}
INSTANTIATE_TEST_CASE_P(FusedL2NNDetTests, FusedL2NNDetTestD_Sq,
::testing::ValuesIn(inputsd));
typedef FusedL2NNDetTest<double, true> FusedL2NNDetTestD_Sqrt;
TEST_P(FusedL2NNDetTestD_Sqrt, Result) {
runTest(min); // assumed to be golden
for (int i = 0; i < NumRepeats; ++i) {
runTest(min1);
ASSERT_TRUE(devArrMatch(min, min1, params.m, CompareExactKVP<double>()));
}
}
INSTANTIATE_TEST_CASE_P(FusedL2NNDetTests, FusedL2NNDetTestD_Sqrt,
::testing::ValuesIn(inputsd));
} // end namespace Distance
} // end namespace MLCommon
|
3865ad0b6c6155a8f1457936dcdc89d8f04a62d9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/get_value.cuh>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/bit.hpp>
#include <cudf/utilities/error.hpp>
#include <io/utilities/parsing_utils.cuh>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/optional.h>
namespace cudf {
namespace strings {
namespace detail {
namespace {
// debug accessibility
// change to "\n" and 1 to make output more readable
#define DEBUG_NEWLINE
constexpr int DEBUG_NEWLINE_LEN = 0;
/**
* @brief Result of calling a parse function.
*
* The primary use of this is to distinguish between "success" and
* "success but no data" return cases. For example, if you are reading the
* values of an array you might call a parse function in a while loop. You
* would want to continue doing this until you either encounter an error (parse_result::ERROR)
* or you get nothing back (parse_result::EMPTY)
*/
enum class parse_result {
ERROR, // failure
SUCCESS, // success
EMPTY, // success, but no data
};
/**
* @brief Base parser class inherited by the (device-side) json_state class and
* (host-side) path_state class.
*
* Contains a number of useful utility functions common to parsing json and
* JSONPath strings.
*/
class parser {
protected:
CUDA_HOST_DEVICE_CALLABLE parser() : input(nullptr), input_len(0), pos(nullptr) {}
CUDA_HOST_DEVICE_CALLABLE parser(const char* _input, int64_t _input_len)
: input(_input), input_len(_input_len), pos(_input)
{
parse_whitespace();
}
CUDA_HOST_DEVICE_CALLABLE parser(parser const& p)
: input(p.input), input_len(p.input_len), pos(p.pos)
{
}
CUDA_HOST_DEVICE_CALLABLE bool eof(const char* p) { return p - input >= input_len; }
CUDA_HOST_DEVICE_CALLABLE bool eof() { return eof(pos); }
CUDA_HOST_DEVICE_CALLABLE bool parse_whitespace()
{
while (!eof()) {
if (is_whitespace(*pos)) {
pos++;
} else {
return true;
}
}
return false;
}
CUDA_HOST_DEVICE_CALLABLE parse_result parse_string(string_view& str,
bool can_be_empty,
char quote)
{
str = string_view(nullptr, 0);
if (parse_whitespace() && *pos == quote) {
const char* start = ++pos;
while (!eof()) {
if (*pos == quote) {
str = string_view(start, pos - start);
pos++;
return parse_result::SUCCESS;
}
pos++;
}
}
return can_be_empty ? parse_result::EMPTY : parse_result::ERROR;
}
// a name means:
// - a string followed by a :
// - no string
CUDA_HOST_DEVICE_CALLABLE parse_result parse_name(string_view& name,
bool can_be_empty,
char quote)
{
if (parse_string(name, can_be_empty, quote) == parse_result::ERROR) {
return parse_result::ERROR;
}
// if we got a real string, the next char must be a :
if (name.size_bytes() > 0) {
if (!parse_whitespace()) { return parse_result::ERROR; }
if (*pos == ':') {
pos++;
return parse_result::SUCCESS;
}
}
return parse_result::EMPTY;
}
// numbers, true, false, null.
// this function is not particularly strong. badly formed values will get
// consumed without throwing any errors
CUDA_HOST_DEVICE_CALLABLE parse_result parse_non_string_value(string_view& val)
{
if (!parse_whitespace()) { return parse_result::ERROR; }
// parse to the end of the value
char const* start = pos;
char const* end = start;
while (!eof(end)) {
char const c = *end;
if (c == ',' || c == '}' || c == ']' || is_whitespace(c)) { break; }
// illegal chars
if (c == '[' || c == '{' || c == ':' || c == '\"') { return parse_result::ERROR; }
end++;
}
pos = end;
val = string_view(start, end - start);
return parse_result::SUCCESS;
}
protected:
char const* input;
int64_t input_len;
char const* pos;
private:
CUDA_HOST_DEVICE_CALLABLE bool is_whitespace(char c) { return c <= ' '; }
};
/**
* @brief Output buffer object. Used during the preprocess/size-computation step
* and the actual output step.
*
* There is an important distinction between two cases:
*
* - producing no output at all. that is, the query matched nothing in the input.
* - producing empty output. the query matched something in the input, but the
* value of the result is an empty string.
*
* The `has_output` field is the flag which indicates whether or not the output
* from the query should be considered empty or null.
*
*/
struct json_output {
size_t output_max_len;
char* output;
thrust::optional<size_t> output_len;
__device__ void add_output(const char* str, size_t len)
{
if (output != nullptr) { memcpy(output + output_len.value_or(0), str, len); }
output_len = output_len.value_or(0) + len;
}
__device__ void add_output(string_view const& str) { add_output(str.data(), str.size_bytes()); }
};
enum json_element_type { NONE, OBJECT, ARRAY, VALUE };
/**
* @brief Parsing class that holds the current state of the json to be parse and provides
* functions for navigating through it.
*/
class json_state : private parser {
public:
__device__ json_state()
: parser(),
cur_el_start(nullptr),
cur_el_type(json_element_type::NONE),
parent_el_type(json_element_type::NONE)
{
}
__device__ json_state(const char* _input, int64_t _input_len)
: parser(_input, _input_len),
cur_el_start(nullptr),
cur_el_type(json_element_type::NONE),
parent_el_type(json_element_type::NONE)
{
}
__device__ json_state(json_state const& j)
: parser(j),
cur_el_start(j.cur_el_start),
cur_el_type(j.cur_el_type),
parent_el_type(j.parent_el_type)
{
}
// retrieve the entire current element into the output
__device__ parse_result extract_element(json_output* output, bool list_element)
{
char const* start = cur_el_start;
char const* end = start;
// if we're a value type, do a simple value parse.
if (cur_el_type == VALUE) {
pos = cur_el_start;
if (parse_value() != parse_result::SUCCESS) { return parse_result::ERROR; }
end = pos;
// SPARK-specific behavior. if this is a non-list-element wrapped in quotes,
// strip them. we may need to make this behavior configurable in some way
// later on.
if (!list_element && *start == '\"' && *(end - 1) == '\"') {
start++;
end--;
}
}
// otherwise, march through everything inside
else {
int obj_count = 0;
int arr_count = 0;
while (!eof(end)) {
// could do some additional checks here. we know our current
// element type, so we could be more strict on what kinds of
// characters we expect to see.
switch (*end++) {
case '{': obj_count++; break;
case '}': obj_count--; break;
case '[': arr_count++; break;
case ']': arr_count--; break;
default: break;
}
if (obj_count == 0 && arr_count == 0) { break; }
}
if (obj_count > 0 || arr_count > 0) { return parse_result::ERROR; }
pos = end;
}
// parse trailing ,
if (parse_whitespace()) {
if (*pos == ',') { pos++; }
}
if (output != nullptr) { output->add_output({start, static_cast<size_type>(end - start)}); }
return parse_result::SUCCESS;
}
// skip the next element
__device__ parse_result skip_element() { return extract_element(nullptr, false); }
// advance to the next element
__device__ parse_result next_element() { return next_element_internal(false); }
// advance inside the current element
__device__ parse_result child_element(json_element_type expected_type)
{
if (expected_type != NONE && cur_el_type != expected_type) { return parse_result::ERROR; }
// if we succeed, record our parent element type.
auto const prev_el_type = cur_el_type;
auto const result = next_element_internal(true);
if (result == parse_result::SUCCESS) { parent_el_type = prev_el_type; }
return result;
}
// return the next element that matches the specified name.
__device__ parse_result next_matching_element(string_view const& name, bool inclusive)
{
// if we're not including the current element, skip it
if (!inclusive) {
parse_result result = next_element_internal(false);
if (result != parse_result::SUCCESS) { return result; }
}
// loop until we find a match or there's nothing left
do {
// wildcard matches anything
if (name.size_bytes() == 1 && name.data()[0] == '*') {
return parse_result::SUCCESS;
} else if (cur_el_name == name) {
return parse_result::SUCCESS;
}
// next
parse_result result = next_element_internal(false);
if (result != parse_result::SUCCESS) { return result; }
} while (1);
return parse_result::ERROR;
}
private:
// parse a value - either a string or a number/null/bool
__device__ parse_result parse_value()
{
if (!parse_whitespace()) { return parse_result::ERROR; }
// string or number?
string_view unused;
return *pos == '\"' ? parse_string(unused, false, '\"') : parse_non_string_value(unused);
}
__device__ parse_result next_element_internal(bool child)
{
// if we're not getting a child element, skip the current element.
// this will leave pos as the first character -after- the close of
// the current element
if (!child && cur_el_start != nullptr) {
if (skip_element() == parse_result::ERROR) { return parse_result::ERROR; }
cur_el_start = nullptr;
}
// otherwise pos will be at the first character within the current element
// can only get the child of an object or array.
// this could theoretically be handled as an error, but the evaluators I've found
// seem to treat this as "it's nothing"
if (child && (cur_el_type == VALUE || cur_el_type == NONE)) { return parse_result::EMPTY; }
// what's next
if (!parse_whitespace()) { return parse_result::EMPTY; }
// if we're closing off a parent element, we're done
char const c = *pos;
if (c == ']' || c == '}') { return parse_result::EMPTY; }
// if we're not accessing elements of an array, check for name.
bool const array_access =
(cur_el_type == ARRAY && child) || (parent_el_type == ARRAY && !child);
if (!array_access && parse_name(cur_el_name, true, '\"') == parse_result::ERROR) {
return parse_result::ERROR;
}
// element type
if (!parse_whitespace()) { return parse_result::EMPTY; }
switch (*pos++) {
case '[': cur_el_type = ARRAY; break;
case '{': cur_el_type = OBJECT; break;
case ',':
case ':':
case '\'': return parse_result::ERROR;
// value type
default: cur_el_type = VALUE; break;
}
// the start of the current element is always at the value, not the name
cur_el_start = pos - 1;
return parse_result::SUCCESS;
}
const char* cur_el_start; // pointer to the first character of the -value- of the current
// element - not the name
string_view cur_el_name; // name of the current element (if applicable)
json_element_type cur_el_type; // type of the current element
json_element_type parent_el_type; // parent element type
};
enum class path_operator_type { ROOT, CHILD, CHILD_WILDCARD, CHILD_INDEX, ERROR, END };
/**
* @brief A "command" operator used to query a json string. A full query is
* an array of these operators applied to the incoming json string,
*/
struct path_operator {
CUDA_HOST_DEVICE_CALLABLE path_operator()
: type(path_operator_type::ERROR), index(-1), expected_type{NONE}
{
}
CUDA_HOST_DEVICE_CALLABLE path_operator(path_operator_type _type,
json_element_type _expected_type = NONE)
: type(_type), index(-1), expected_type{_expected_type}
{
}
path_operator_type type; // operator type
// the expected element type we're applying this operation to.
// for example:
// - you cannot retrieve a subscripted field (eg [5]) from an object.
// - you cannot retrieve a field by name (eg .book) from an array.
// - you -can- use .* for both arrays and objects
// a value of NONE imples any type accepted
json_element_type expected_type; // the expected type of the element we're working with
string_view name; // name to match against (if applicable)
int index; // index for subscript operator
};
/**
* @brief Parsing class that holds the current state of the JSONPath string to be parsed
* and provides functions for navigating through it. This is only called on the host
* during the preprocess step which builds a command buffer that the gpu uses.
*/
class path_state : private parser {
public:
path_state(const char* _path, size_t _path_len) : parser(_path, _path_len) {}
// get the next operator in the JSONPath string
path_operator get_next_operator()
{
if (eof()) { return {path_operator_type::END}; }
switch (*pos++) {
case '$': return {path_operator_type::ROOT};
case '.': {
path_operator op;
string_view term{".[", 2};
if (parse_path_name(op.name, term)) {
// this is another potential use case for __SPARK_BEHAVIORS / configurability
// Spark currently only handles the wildcard operator inside [*], it does
// not handle .*
if (op.name.size_bytes() == 1 && op.name.data()[0] == '*') {
op.type = path_operator_type::CHILD_WILDCARD;
op.expected_type = NONE;
} else {
op.type = path_operator_type::CHILD;
op.expected_type = OBJECT;
}
return op;
}
} break;
// 3 ways this can be used
// indices: [0]
// name: ['book']
// wildcard: [*]
case '[': {
path_operator op;
string_view term{"]", 1};
bool const is_string = *pos == '\'' ? true : false;
if (parse_path_name(op.name, term)) {
pos++;
if (op.name.size_bytes() == 1 && op.name.data()[0] == '*') {
op.type = path_operator_type::CHILD_WILDCARD;
op.expected_type = NONE;
} else {
if (is_string) {
op.type = path_operator_type::CHILD;
op.expected_type = OBJECT;
} else {
op.type = path_operator_type::CHILD_INDEX;
op.index = cudf::io::parse_numeric<int>(
op.name.data(), op.name.data() + op.name.size_bytes(), json_opts, -1);
CUDF_EXPECTS(op.index >= 0, "Invalid numeric index specified in JSONPath");
op.expected_type = ARRAY;
}
}
return op;
}
} break;
// wildcard operator
case '*': {
pos++;
return path_operator{path_operator_type::CHILD_WILDCARD};
} break;
default: CUDF_FAIL("Unrecognized JSONPath operator"); break;
}
return {path_operator_type::ERROR};
}
private:
cudf::io::parse_options_view json_opts{',', '\n', '\"', '.'};
bool parse_path_name(string_view& name, string_view const& terminators)
{
switch (*pos) {
case '*':
name = string_view(pos, 1);
pos++;
break;
case '\'':
if (parse_string(name, false, '\'') != parse_result::SUCCESS) { return false; }
break;
default: {
size_t const chars_left = input_len - (pos - input);
char const* end = std::find_first_of(
pos, pos + chars_left, terminators.data(), terminators.data() + terminators.size_bytes());
if (end) {
name = string_view(pos, end - pos);
pos = end;
} else {
name = string_view(pos, chars_left);
pos = input + input_len;
}
break;
}
}
// an empty name is not valid
CUDF_EXPECTS(name.size_bytes() > 0, "Invalid empty name in JSONPath query string");
return true;
}
};
/**
* @brief Preprocess the incoming JSONPath string on the host to generate a
* command buffer for use by the GPU.
*
* @param json_path The incoming json path
* @param stream Cuda stream to perform any gpu actions on
* @returns A pair containing the command buffer, and maximum stack depth required.
*/
std::pair<thrust::optional<rmm::device_uvector<path_operator>>, int> build_command_buffer(
cudf::string_scalar const& json_path, rmm::cuda_stream_view stream)
{
std::string h_json_path = json_path.to_string(stream);
path_state p_state(h_json_path.data(), static_cast<size_type>(h_json_path.size()));
std::vector<path_operator> h_operators;
path_operator op;
int max_stack_depth = 1;
do {
op = p_state.get_next_operator();
if (op.type == path_operator_type::ERROR) {
CUDF_FAIL("Encountered invalid JSONPath input string");
}
if (op.type == path_operator_type::CHILD_WILDCARD) { max_stack_depth++; }
// convert pointer to device pointer
if (op.name.size_bytes() > 0) {
op.name =
string_view(json_path.data() + (op.name.data() - h_json_path.data()), op.name.size_bytes());
}
if (op.type == path_operator_type::ROOT) {
CUDF_EXPECTS(h_operators.size() == 0, "Root operator ($) can only exist at the root");
}
// if we havent' gotten a root operator to start, and we're not empty, quietly push a
// root operator now.
if (h_operators.size() == 0 && op.type != path_operator_type::ROOT &&
op.type != path_operator_type::END) {
h_operators.push_back(path_operator{path_operator_type::ROOT});
}
h_operators.push_back(op);
} while (op.type != path_operator_type::END);
auto const is_empty = h_operators.size() == 1 && h_operators[0].type == path_operator_type::END;
return is_empty
? std::make_pair(thrust::nullopt, 0)
: std::make_pair(
thrust::make_optional(cudf::detail::make_device_uvector_sync(h_operators, stream)),
max_stack_depth);
}
#define PARSE_TRY(_x) \
do { \
last_result = _x; \
if (last_result == parse_result::ERROR) { return parse_result::ERROR; } \
} while (0)
/**
* @brief Parse a single json string using the provided command buffer
*
* @param j_state The incoming json string and associated parser
* @param commands The command buffer to be applied to the string. Always ends with a
* path_operator_type::END
* @param output Buffer user to store the results of the query
* @returns A result code indicating success/fail/empty.
*/
template <int max_command_stack_depth>
__device__ parse_result parse_json_path(json_state& j_state,
path_operator const* commands,
json_output& output)
{
// manually maintained context stack in lieu of calling parse_json_path recursively.
struct context {
json_state j_state;
path_operator const* commands;
bool list_element;
bool state_flag;
};
context stack[max_command_stack_depth];
int stack_pos = 0;
auto push_context = [&stack, &stack_pos](json_state const& _j_state,
path_operator const* _commands,
bool _list_element = false,
bool _state_flag = false) {
if (stack_pos == max_command_stack_depth - 1) { return false; }
stack[stack_pos++] = context{_j_state, _commands, _list_element, _state_flag};
return true;
};
auto pop_context = [&stack, &stack_pos](context& c) {
if (stack_pos > 0) {
c = stack[--stack_pos];
return true;
}
return false;
};
push_context(j_state, commands, false);
parse_result last_result = parse_result::SUCCESS;
context ctx;
int element_count = 0;
while (pop_context(ctx)) {
path_operator op = *ctx.commands;
switch (op.type) {
// whatever the first object is
case path_operator_type::ROOT:
PARSE_TRY(ctx.j_state.next_element());
push_context(ctx.j_state, ctx.commands + 1);
break;
// .name
// ['name']
// [1]
// will return a single thing
case path_operator_type::CHILD: {
PARSE_TRY(ctx.j_state.child_element(op.expected_type));
if (last_result == parse_result::SUCCESS) {
PARSE_TRY(ctx.j_state.next_matching_element(op.name, true));
if (last_result == parse_result::SUCCESS) {
push_context(ctx.j_state, ctx.commands + 1, ctx.list_element);
}
}
} break;
// .*
// [*]
// will return an array of things
case path_operator_type::CHILD_WILDCARD: {
// if we're on the first element of this wildcard
if (!ctx.state_flag) {
// we will only ever be returning 1 array
if (!ctx.list_element) { output.add_output({"[" DEBUG_NEWLINE, 1 + DEBUG_NEWLINE_LEN}); }
// step into the child element
PARSE_TRY(ctx.j_state.child_element(op.expected_type));
if (last_result == parse_result::EMPTY) {
if (!ctx.list_element) {
output.add_output({"]" DEBUG_NEWLINE, 1 + DEBUG_NEWLINE_LEN});
}
last_result = parse_result::SUCCESS;
break;
}
// first element
PARSE_TRY(ctx.j_state.next_matching_element({"*", 1}, true));
if (last_result == parse_result::EMPTY) {
if (!ctx.list_element) {
output.add_output({"]" DEBUG_NEWLINE, 1 + DEBUG_NEWLINE_LEN});
}
last_result = parse_result::SUCCESS;
break;
}
// re-push ourselves
push_context(ctx.j_state, ctx.commands, ctx.list_element, true);
// push the next command
push_context(ctx.j_state, ctx.commands + 1, true);
} else {
// next element
PARSE_TRY(ctx.j_state.next_matching_element({"*", 1}, false));
if (last_result == parse_result::EMPTY) {
if (!ctx.list_element) {
output.add_output({"]" DEBUG_NEWLINE, 1 + DEBUG_NEWLINE_LEN});
}
last_result = parse_result::SUCCESS;
break;
}
// re-push ourselves
push_context(ctx.j_state, ctx.commands, ctx.list_element, true);
// push the next command
push_context(ctx.j_state, ctx.commands + 1, true);
}
} break;
// [0]
// [1]
// etc
// returns a single thing
case path_operator_type::CHILD_INDEX: {
PARSE_TRY(ctx.j_state.child_element(op.expected_type));
if (last_result == parse_result::SUCCESS) {
string_view const any{"*", 1};
PARSE_TRY(ctx.j_state.next_matching_element(any, true));
if (last_result == parse_result::SUCCESS) {
int idx;
for (idx = 1; idx <= op.index; idx++) {
PARSE_TRY(ctx.j_state.next_matching_element(any, false));
if (last_result == parse_result::EMPTY) { break; }
}
// if we didn't end up at the index we requested, this is an invalid index
if (idx - 1 != op.index) { return parse_result::ERROR; }
push_context(ctx.j_state, ctx.commands + 1, ctx.list_element);
}
}
} break;
// some sort of error.
case path_operator_type::ERROR: return parse_result::ERROR; break;
// END case
default: {
if (ctx.list_element && element_count > 0) {
output.add_output({"," DEBUG_NEWLINE, 1 + DEBUG_NEWLINE_LEN});
}
PARSE_TRY(ctx.j_state.extract_element(&output, ctx.list_element));
if (ctx.list_element && last_result != parse_result::EMPTY) { element_count++; }
} break;
}
}
return parse_result::SUCCESS;
}
// hardcoding this for now. to reach a stack depth of 8 would require
// a JSONPath containing 7 nested wildcards so this is probably reasonable.
constexpr int max_command_stack_depth = 8;
/**
* @brief Parse a single json string using the provided command buffer
*
* This function exists primarily as a shim for debugging purposes.
*
* @param input The incoming json string
* @param input_len Size of the incoming json string
* @param commands The command buffer to be applied to the string. Always ends with a
* path_operator_type::END
* @param out_buf Buffer user to store the results of the query (nullptr in the size computation
* step)
* @param out_buf_size Size of the output buffer
* @returns A pair containing the result code the output buffer.
*/
__device__ thrust::pair<parse_result, json_output> get_json_object_single(
char const* input,
size_t input_len,
path_operator const* const commands,
char* out_buf,
size_t out_buf_size)
{
json_state j_state(input, input_len);
json_output output{out_buf_size, out_buf};
auto const result = parse_json_path<max_command_stack_depth>(j_state, commands, output);
return {result, output};
}
/**
* @brief Kernel for running the JSONPath query.
*
* This kernel operates in a 2-pass way. On the first pass, it computes
* output sizes. On the second pass it fills in the provided output buffers
* (chars and validity)
*
* @param col Device view of the incoming string
* @param commands JSONPath command buffer
* @param output_offsets Buffer used to store the string offsets for the results of the query
* @param out_buf Buffer used to store the results of the query
* @param out_validity Output validity buffer
* @param out_valid_count Output count of # of valid bits
*/
template <int block_size>
__launch_bounds__(block_size) __global__
void get_json_object_kernel(column_device_view col,
path_operator const* const commands,
offset_type* output_offsets,
thrust::optional<char*> out_buf,
thrust::optional<bitmask_type*> out_validity,
thrust::optional<size_type*> out_valid_count)
{
size_type tid = threadIdx.x + (blockDim.x * blockIdx.x);
size_type stride = blockDim.x * gridDim.x;
if (out_valid_count.has_value()) { *(out_valid_count.value()) = 0; }
size_type warp_valid_count{0};
auto active_threads = __ballot_sync(0xffffffff, tid < col.size());
while (tid < col.size()) {
bool is_valid = false;
string_view const str = col.element<string_view>(tid);
size_type output_size = 0;
if (str.size_bytes() > 0) {
char* dst = out_buf.has_value() ? out_buf.value() + output_offsets[tid] : nullptr;
size_t const dst_size =
out_buf.has_value() ? output_offsets[tid + 1] - output_offsets[tid] : 0;
parse_result result;
json_output out;
thrust::tie(result, out) =
get_json_object_single(str.data(), str.size_bytes(), commands, dst, dst_size);
output_size = out.output_len.value_or(0);
if (out.output_len.has_value() && result == parse_result::SUCCESS) { is_valid = true; }
}
// filled in only during the precompute step. during the compute step, the offsets
// are fed back in so we do -not- want to write them out
if (!out_buf.has_value()) { output_offsets[tid] = static_cast<offset_type>(output_size); }
// validity filled in only during the output step
if (out_validity.has_value()) {
uint32_t mask = __ballot_sync(active_threads, is_valid);
// 0th lane of the warp writes the validity
if (!(tid % cudf::detail::warp_size)) {
out_validity.value()[cudf::word_index(tid)] = mask;
warp_valid_count += __popc(mask);
}
}
tid += stride;
active_threads = __ballot_sync(active_threads, tid < col.size());
}
// sum the valid counts across the whole block
if (out_valid_count) {
size_type block_valid_count =
cudf::detail::single_lane_block_sum_reduce<block_size, 0>(warp_valid_count);
if (threadIdx.x == 0) { atomicAdd(out_valid_count.value(), block_valid_count); }
}
}
/**
* @copydoc cudf::strings::detail::get_json_object
*/
std::unique_ptr<cudf::column> get_json_object(cudf::strings_column_view const& col,
cudf::string_scalar const& json_path,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// preprocess the json_path into a command buffer
auto preprocess = build_command_buffer(json_path, stream);
CUDF_EXPECTS(std::get<1>(preprocess) <= max_command_stack_depth,
"Encountered JSONPath string that is too complex");
// allocate output offsets buffer.
auto offsets = cudf::make_fixed_width_column(
data_type{type_id::INT32}, col.size() + 1, mask_state::UNALLOCATED, stream, mr);
cudf::mutable_column_view offsets_view(*offsets);
// if the query is empty, return a string column containing all nulls
if (!std::get<0>(preprocess).has_value()) {
return std::make_unique<column>(
data_type{type_id::STRING},
col.size(),
rmm::device_buffer{0, stream, mr}, // no data
cudf::detail::create_null_mask(col.size(), mask_state::ALL_NULL, stream, mr),
col.size()); // null count
}
constexpr int block_size = 512;
cudf::detail::grid_1d const grid{col.size(), block_size};
auto cdv = column_device_view::create(col.parent(), stream);
// preprocess sizes (returned in the offsets buffer)
hipLaunchKernelGGL(( get_json_object_kernel<block_size>)
, dim3(grid.num_blocks), dim3(grid.num_threads_per_block), 0, stream.value(),
*cdv,
std::get<0>(preprocess).value().data(),
offsets_view.head<offset_type>(),
thrust::nullopt,
thrust::nullopt,
thrust::nullopt);
// convert sizes to offsets
thrust::exclusive_scan(rmm::exec_policy(stream),
offsets_view.head<offset_type>(),
offsets_view.head<offset_type>() + col.size() + 1,
offsets_view.head<offset_type>(),
0);
size_type const output_size =
cudf::detail::get_value<offset_type>(offsets_view, col.size(), stream);
// allocate output string column
auto chars = cudf::make_fixed_width_column(
data_type{type_id::INT8}, output_size, mask_state::UNALLOCATED, stream, mr);
// potential optimization : if we know that all outputs are valid, we could skip creating
// the validity mask altogether
rmm::device_buffer validity =
cudf::detail::create_null_mask(col.size(), mask_state::UNINITIALIZED, stream, mr);
// compute results
cudf::mutable_column_view chars_view(*chars);
rmm::device_scalar<size_type> d_valid_count{0, stream};
hipLaunchKernelGGL(( get_json_object_kernel<block_size>)
, dim3(grid.num_blocks), dim3(grid.num_threads_per_block), 0, stream.value(),
*cdv,
std::get<0>(preprocess).value().data(),
offsets_view.head<offset_type>(),
chars_view.head<char>(),
static_cast<bitmask_type*>(validity.data()),
d_valid_count.data());
return make_strings_column(col.size(),
std::move(offsets),
std::move(chars),
col.size() - d_valid_count.value(),
std::move(validity),
stream,
mr);
}
} // namespace
} // namespace detail
/**
* @copydoc cudf::strings::get_json_object
*/
std::unique_ptr<cudf::column> get_json_object(cudf::strings_column_view const& col,
cudf::string_scalar const& json_path,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::get_json_object(col, json_path, 0, mr);
}
} // namespace strings
} // namespace cudf
|
3865ad0b6c6155a8f1457936dcdc89d8f04a62d9.cu
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/get_value.cuh>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/bit.hpp>
#include <cudf/utilities/error.hpp>
#include <io/utilities/parsing_utils.cuh>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/optional.h>
namespace cudf {
namespace strings {
namespace detail {
namespace {
// debug accessibility
// change to "\n" and 1 to make output more readable
#define DEBUG_NEWLINE
constexpr int DEBUG_NEWLINE_LEN = 0;
/**
* @brief Result of calling a parse function.
*
* The primary use of this is to distinguish between "success" and
* "success but no data" return cases. For example, if you are reading the
* values of an array you might call a parse function in a while loop. You
* would want to continue doing this until you either encounter an error (parse_result::ERROR)
* or you get nothing back (parse_result::EMPTY)
*/
enum class parse_result {
ERROR, // failure
SUCCESS, // success
EMPTY, // success, but no data
};
/**
* @brief Base parser class inherited by the (device-side) json_state class and
* (host-side) path_state class.
*
* Contains a number of useful utility functions common to parsing json and
* JSONPath strings.
*/
class parser {
protected:
CUDA_HOST_DEVICE_CALLABLE parser() : input(nullptr), input_len(0), pos(nullptr) {}
CUDA_HOST_DEVICE_CALLABLE parser(const char* _input, int64_t _input_len)
: input(_input), input_len(_input_len), pos(_input)
{
parse_whitespace();
}
CUDA_HOST_DEVICE_CALLABLE parser(parser const& p)
: input(p.input), input_len(p.input_len), pos(p.pos)
{
}
CUDA_HOST_DEVICE_CALLABLE bool eof(const char* p) { return p - input >= input_len; }
CUDA_HOST_DEVICE_CALLABLE bool eof() { return eof(pos); }
CUDA_HOST_DEVICE_CALLABLE bool parse_whitespace()
{
while (!eof()) {
if (is_whitespace(*pos)) {
pos++;
} else {
return true;
}
}
return false;
}
CUDA_HOST_DEVICE_CALLABLE parse_result parse_string(string_view& str,
bool can_be_empty,
char quote)
{
str = string_view(nullptr, 0);
if (parse_whitespace() && *pos == quote) {
const char* start = ++pos;
while (!eof()) {
if (*pos == quote) {
str = string_view(start, pos - start);
pos++;
return parse_result::SUCCESS;
}
pos++;
}
}
return can_be_empty ? parse_result::EMPTY : parse_result::ERROR;
}
// a name means:
// - a string followed by a :
// - no string
CUDA_HOST_DEVICE_CALLABLE parse_result parse_name(string_view& name,
bool can_be_empty,
char quote)
{
if (parse_string(name, can_be_empty, quote) == parse_result::ERROR) {
return parse_result::ERROR;
}
// if we got a real string, the next char must be a :
if (name.size_bytes() > 0) {
if (!parse_whitespace()) { return parse_result::ERROR; }
if (*pos == ':') {
pos++;
return parse_result::SUCCESS;
}
}
return parse_result::EMPTY;
}
// numbers, true, false, null.
// this function is not particularly strong. badly formed values will get
// consumed without throwing any errors
CUDA_HOST_DEVICE_CALLABLE parse_result parse_non_string_value(string_view& val)
{
if (!parse_whitespace()) { return parse_result::ERROR; }
// parse to the end of the value
char const* start = pos;
char const* end = start;
while (!eof(end)) {
char const c = *end;
if (c == ',' || c == '}' || c == ']' || is_whitespace(c)) { break; }
// illegal chars
if (c == '[' || c == '{' || c == ':' || c == '\"') { return parse_result::ERROR; }
end++;
}
pos = end;
val = string_view(start, end - start);
return parse_result::SUCCESS;
}
protected:
char const* input;
int64_t input_len;
char const* pos;
private:
CUDA_HOST_DEVICE_CALLABLE bool is_whitespace(char c) { return c <= ' '; }
};
/**
* @brief Output buffer object. Used during the preprocess/size-computation step
* and the actual output step.
*
* There is an important distinction between two cases:
*
* - producing no output at all. that is, the query matched nothing in the input.
* - producing empty output. the query matched something in the input, but the
* value of the result is an empty string.
*
* The `has_output` field is the flag which indicates whether or not the output
* from the query should be considered empty or null.
*
*/
struct json_output {
size_t output_max_len;
char* output;
thrust::optional<size_t> output_len;
__device__ void add_output(const char* str, size_t len)
{
if (output != nullptr) { memcpy(output + output_len.value_or(0), str, len); }
output_len = output_len.value_or(0) + len;
}
__device__ void add_output(string_view const& str) { add_output(str.data(), str.size_bytes()); }
};
enum json_element_type { NONE, OBJECT, ARRAY, VALUE };
/**
* @brief Parsing class that holds the current state of the json to be parse and provides
* functions for navigating through it.
*/
class json_state : private parser {
public:
__device__ json_state()
: parser(),
cur_el_start(nullptr),
cur_el_type(json_element_type::NONE),
parent_el_type(json_element_type::NONE)
{
}
__device__ json_state(const char* _input, int64_t _input_len)
: parser(_input, _input_len),
cur_el_start(nullptr),
cur_el_type(json_element_type::NONE),
parent_el_type(json_element_type::NONE)
{
}
__device__ json_state(json_state const& j)
: parser(j),
cur_el_start(j.cur_el_start),
cur_el_type(j.cur_el_type),
parent_el_type(j.parent_el_type)
{
}
// retrieve the entire current element into the output
__device__ parse_result extract_element(json_output* output, bool list_element)
{
char const* start = cur_el_start;
char const* end = start;
// if we're a value type, do a simple value parse.
if (cur_el_type == VALUE) {
pos = cur_el_start;
if (parse_value() != parse_result::SUCCESS) { return parse_result::ERROR; }
end = pos;
// SPARK-specific behavior. if this is a non-list-element wrapped in quotes,
// strip them. we may need to make this behavior configurable in some way
// later on.
if (!list_element && *start == '\"' && *(end - 1) == '\"') {
start++;
end--;
}
}
// otherwise, march through everything inside
else {
int obj_count = 0;
int arr_count = 0;
while (!eof(end)) {
// could do some additional checks here. we know our current
// element type, so we could be more strict on what kinds of
// characters we expect to see.
switch (*end++) {
case '{': obj_count++; break;
case '}': obj_count--; break;
case '[': arr_count++; break;
case ']': arr_count--; break;
default: break;
}
if (obj_count == 0 && arr_count == 0) { break; }
}
if (obj_count > 0 || arr_count > 0) { return parse_result::ERROR; }
pos = end;
}
// parse trailing ,
if (parse_whitespace()) {
if (*pos == ',') { pos++; }
}
if (output != nullptr) { output->add_output({start, static_cast<size_type>(end - start)}); }
return parse_result::SUCCESS;
}
// skip the next element
__device__ parse_result skip_element() { return extract_element(nullptr, false); }
// advance to the next element
__device__ parse_result next_element() { return next_element_internal(false); }
// advance inside the current element
__device__ parse_result child_element(json_element_type expected_type)
{
if (expected_type != NONE && cur_el_type != expected_type) { return parse_result::ERROR; }
// if we succeed, record our parent element type.
auto const prev_el_type = cur_el_type;
auto const result = next_element_internal(true);
if (result == parse_result::SUCCESS) { parent_el_type = prev_el_type; }
return result;
}
// return the next element that matches the specified name.
__device__ parse_result next_matching_element(string_view const& name, bool inclusive)
{
// if we're not including the current element, skip it
if (!inclusive) {
parse_result result = next_element_internal(false);
if (result != parse_result::SUCCESS) { return result; }
}
// loop until we find a match or there's nothing left
do {
// wildcard matches anything
if (name.size_bytes() == 1 && name.data()[0] == '*') {
return parse_result::SUCCESS;
} else if (cur_el_name == name) {
return parse_result::SUCCESS;
}
// next
parse_result result = next_element_internal(false);
if (result != parse_result::SUCCESS) { return result; }
} while (1);
return parse_result::ERROR;
}
private:
// parse a value - either a string or a number/null/bool
__device__ parse_result parse_value()
{
if (!parse_whitespace()) { return parse_result::ERROR; }
// string or number?
string_view unused;
return *pos == '\"' ? parse_string(unused, false, '\"') : parse_non_string_value(unused);
}
__device__ parse_result next_element_internal(bool child)
{
// if we're not getting a child element, skip the current element.
// this will leave pos as the first character -after- the close of
// the current element
if (!child && cur_el_start != nullptr) {
if (skip_element() == parse_result::ERROR) { return parse_result::ERROR; }
cur_el_start = nullptr;
}
// otherwise pos will be at the first character within the current element
// can only get the child of an object or array.
// this could theoretically be handled as an error, but the evaluators I've found
// seem to treat this as "it's nothing"
if (child && (cur_el_type == VALUE || cur_el_type == NONE)) { return parse_result::EMPTY; }
// what's next
if (!parse_whitespace()) { return parse_result::EMPTY; }
// if we're closing off a parent element, we're done
char const c = *pos;
if (c == ']' || c == '}') { return parse_result::EMPTY; }
// if we're not accessing elements of an array, check for name.
bool const array_access =
(cur_el_type == ARRAY && child) || (parent_el_type == ARRAY && !child);
if (!array_access && parse_name(cur_el_name, true, '\"') == parse_result::ERROR) {
return parse_result::ERROR;
}
// element type
if (!parse_whitespace()) { return parse_result::EMPTY; }
switch (*pos++) {
case '[': cur_el_type = ARRAY; break;
case '{': cur_el_type = OBJECT; break;
case ',':
case ':':
case '\'': return parse_result::ERROR;
// value type
default: cur_el_type = VALUE; break;
}
// the start of the current element is always at the value, not the name
cur_el_start = pos - 1;
return parse_result::SUCCESS;
}
const char* cur_el_start; // pointer to the first character of the -value- of the current
// element - not the name
string_view cur_el_name; // name of the current element (if applicable)
json_element_type cur_el_type; // type of the current element
json_element_type parent_el_type; // parent element type
};
enum class path_operator_type { ROOT, CHILD, CHILD_WILDCARD, CHILD_INDEX, ERROR, END };
/**
* @brief A "command" operator used to query a json string. A full query is
* an array of these operators applied to the incoming json string,
*/
struct path_operator {
CUDA_HOST_DEVICE_CALLABLE path_operator()
: type(path_operator_type::ERROR), index(-1), expected_type{NONE}
{
}
CUDA_HOST_DEVICE_CALLABLE path_operator(path_operator_type _type,
json_element_type _expected_type = NONE)
: type(_type), index(-1), expected_type{_expected_type}
{
}
path_operator_type type; // operator type
// the expected element type we're applying this operation to.
// for example:
// - you cannot retrieve a subscripted field (eg [5]) from an object.
// - you cannot retrieve a field by name (eg .book) from an array.
// - you -can- use .* for both arrays and objects
// a value of NONE imples any type accepted
json_element_type expected_type; // the expected type of the element we're working with
string_view name; // name to match against (if applicable)
int index; // index for subscript operator
};
/**
* @brief Parsing class that holds the current state of the JSONPath string to be parsed
* and provides functions for navigating through it. This is only called on the host
* during the preprocess step which builds a command buffer that the gpu uses.
*/
class path_state : private parser {
public:
path_state(const char* _path, size_t _path_len) : parser(_path, _path_len) {}
// get the next operator in the JSONPath string
path_operator get_next_operator()
{
if (eof()) { return {path_operator_type::END}; }
switch (*pos++) {
case '$': return {path_operator_type::ROOT};
case '.': {
path_operator op;
string_view term{".[", 2};
if (parse_path_name(op.name, term)) {
// this is another potential use case for __SPARK_BEHAVIORS / configurability
// Spark currently only handles the wildcard operator inside [*], it does
// not handle .*
if (op.name.size_bytes() == 1 && op.name.data()[0] == '*') {
op.type = path_operator_type::CHILD_WILDCARD;
op.expected_type = NONE;
} else {
op.type = path_operator_type::CHILD;
op.expected_type = OBJECT;
}
return op;
}
} break;
// 3 ways this can be used
// indices: [0]
// name: ['book']
// wildcard: [*]
case '[': {
path_operator op;
string_view term{"]", 1};
bool const is_string = *pos == '\'' ? true : false;
if (parse_path_name(op.name, term)) {
pos++;
if (op.name.size_bytes() == 1 && op.name.data()[0] == '*') {
op.type = path_operator_type::CHILD_WILDCARD;
op.expected_type = NONE;
} else {
if (is_string) {
op.type = path_operator_type::CHILD;
op.expected_type = OBJECT;
} else {
op.type = path_operator_type::CHILD_INDEX;
op.index = cudf::io::parse_numeric<int>(
op.name.data(), op.name.data() + op.name.size_bytes(), json_opts, -1);
CUDF_EXPECTS(op.index >= 0, "Invalid numeric index specified in JSONPath");
op.expected_type = ARRAY;
}
}
return op;
}
} break;
// wildcard operator
case '*': {
pos++;
return path_operator{path_operator_type::CHILD_WILDCARD};
} break;
default: CUDF_FAIL("Unrecognized JSONPath operator"); break;
}
return {path_operator_type::ERROR};
}
private:
cudf::io::parse_options_view json_opts{',', '\n', '\"', '.'};
bool parse_path_name(string_view& name, string_view const& terminators)
{
switch (*pos) {
case '*':
name = string_view(pos, 1);
pos++;
break;
case '\'':
if (parse_string(name, false, '\'') != parse_result::SUCCESS) { return false; }
break;
default: {
size_t const chars_left = input_len - (pos - input);
char const* end = std::find_first_of(
pos, pos + chars_left, terminators.data(), terminators.data() + terminators.size_bytes());
if (end) {
name = string_view(pos, end - pos);
pos = end;
} else {
name = string_view(pos, chars_left);
pos = input + input_len;
}
break;
}
}
// an empty name is not valid
CUDF_EXPECTS(name.size_bytes() > 0, "Invalid empty name in JSONPath query string");
return true;
}
};
/**
* @brief Preprocess the incoming JSONPath string on the host to generate a
* command buffer for use by the GPU.
*
* @param json_path The incoming json path
* @param stream Cuda stream to perform any gpu actions on
* @returns A pair containing the command buffer, and maximum stack depth required.
*/
std::pair<thrust::optional<rmm::device_uvector<path_operator>>, int> build_command_buffer(
cudf::string_scalar const& json_path, rmm::cuda_stream_view stream)
{
std::string h_json_path = json_path.to_string(stream);
path_state p_state(h_json_path.data(), static_cast<size_type>(h_json_path.size()));
std::vector<path_operator> h_operators;
path_operator op;
int max_stack_depth = 1;
do {
op = p_state.get_next_operator();
if (op.type == path_operator_type::ERROR) {
CUDF_FAIL("Encountered invalid JSONPath input string");
}
if (op.type == path_operator_type::CHILD_WILDCARD) { max_stack_depth++; }
// convert pointer to device pointer
if (op.name.size_bytes() > 0) {
op.name =
string_view(json_path.data() + (op.name.data() - h_json_path.data()), op.name.size_bytes());
}
if (op.type == path_operator_type::ROOT) {
CUDF_EXPECTS(h_operators.size() == 0, "Root operator ($) can only exist at the root");
}
// if we havent' gotten a root operator to start, and we're not empty, quietly push a
// root operator now.
if (h_operators.size() == 0 && op.type != path_operator_type::ROOT &&
op.type != path_operator_type::END) {
h_operators.push_back(path_operator{path_operator_type::ROOT});
}
h_operators.push_back(op);
} while (op.type != path_operator_type::END);
auto const is_empty = h_operators.size() == 1 && h_operators[0].type == path_operator_type::END;
return is_empty
? std::make_pair(thrust::nullopt, 0)
: std::make_pair(
thrust::make_optional(cudf::detail::make_device_uvector_sync(h_operators, stream)),
max_stack_depth);
}
#define PARSE_TRY(_x) \
do { \
last_result = _x; \
if (last_result == parse_result::ERROR) { return parse_result::ERROR; } \
} while (0)
/**
* @brief Parse a single json string using the provided command buffer
*
* @param j_state The incoming json string and associated parser
* @param commands The command buffer to be applied to the string. Always ends with a
* path_operator_type::END
* @param output Buffer user to store the results of the query
* @returns A result code indicating success/fail/empty.
*/
template <int max_command_stack_depth>
__device__ parse_result parse_json_path(json_state& j_state,
path_operator const* commands,
json_output& output)
{
// manually maintained context stack in lieu of calling parse_json_path recursively.
struct context {
json_state j_state;
path_operator const* commands;
bool list_element;
bool state_flag;
};
context stack[max_command_stack_depth];
int stack_pos = 0;
auto push_context = [&stack, &stack_pos](json_state const& _j_state,
path_operator const* _commands,
bool _list_element = false,
bool _state_flag = false) {
if (stack_pos == max_command_stack_depth - 1) { return false; }
stack[stack_pos++] = context{_j_state, _commands, _list_element, _state_flag};
return true;
};
auto pop_context = [&stack, &stack_pos](context& c) {
if (stack_pos > 0) {
c = stack[--stack_pos];
return true;
}
return false;
};
push_context(j_state, commands, false);
parse_result last_result = parse_result::SUCCESS;
context ctx;
int element_count = 0;
while (pop_context(ctx)) {
path_operator op = *ctx.commands;
switch (op.type) {
// whatever the first object is
case path_operator_type::ROOT:
PARSE_TRY(ctx.j_state.next_element());
push_context(ctx.j_state, ctx.commands + 1);
break;
// .name
// ['name']
// [1]
// will return a single thing
case path_operator_type::CHILD: {
PARSE_TRY(ctx.j_state.child_element(op.expected_type));
if (last_result == parse_result::SUCCESS) {
PARSE_TRY(ctx.j_state.next_matching_element(op.name, true));
if (last_result == parse_result::SUCCESS) {
push_context(ctx.j_state, ctx.commands + 1, ctx.list_element);
}
}
} break;
// .*
// [*]
// will return an array of things
case path_operator_type::CHILD_WILDCARD: {
// if we're on the first element of this wildcard
if (!ctx.state_flag) {
// we will only ever be returning 1 array
if (!ctx.list_element) { output.add_output({"[" DEBUG_NEWLINE, 1 + DEBUG_NEWLINE_LEN}); }
// step into the child element
PARSE_TRY(ctx.j_state.child_element(op.expected_type));
if (last_result == parse_result::EMPTY) {
if (!ctx.list_element) {
output.add_output({"]" DEBUG_NEWLINE, 1 + DEBUG_NEWLINE_LEN});
}
last_result = parse_result::SUCCESS;
break;
}
// first element
PARSE_TRY(ctx.j_state.next_matching_element({"*", 1}, true));
if (last_result == parse_result::EMPTY) {
if (!ctx.list_element) {
output.add_output({"]" DEBUG_NEWLINE, 1 + DEBUG_NEWLINE_LEN});
}
last_result = parse_result::SUCCESS;
break;
}
// re-push ourselves
push_context(ctx.j_state, ctx.commands, ctx.list_element, true);
// push the next command
push_context(ctx.j_state, ctx.commands + 1, true);
} else {
// next element
PARSE_TRY(ctx.j_state.next_matching_element({"*", 1}, false));
if (last_result == parse_result::EMPTY) {
if (!ctx.list_element) {
output.add_output({"]" DEBUG_NEWLINE, 1 + DEBUG_NEWLINE_LEN});
}
last_result = parse_result::SUCCESS;
break;
}
// re-push ourselves
push_context(ctx.j_state, ctx.commands, ctx.list_element, true);
// push the next command
push_context(ctx.j_state, ctx.commands + 1, true);
}
} break;
// [0]
// [1]
// etc
// returns a single thing
case path_operator_type::CHILD_INDEX: {
PARSE_TRY(ctx.j_state.child_element(op.expected_type));
if (last_result == parse_result::SUCCESS) {
string_view const any{"*", 1};
PARSE_TRY(ctx.j_state.next_matching_element(any, true));
if (last_result == parse_result::SUCCESS) {
int idx;
for (idx = 1; idx <= op.index; idx++) {
PARSE_TRY(ctx.j_state.next_matching_element(any, false));
if (last_result == parse_result::EMPTY) { break; }
}
// if we didn't end up at the index we requested, this is an invalid index
if (idx - 1 != op.index) { return parse_result::ERROR; }
push_context(ctx.j_state, ctx.commands + 1, ctx.list_element);
}
}
} break;
// some sort of error.
case path_operator_type::ERROR: return parse_result::ERROR; break;
// END case
default: {
if (ctx.list_element && element_count > 0) {
output.add_output({"," DEBUG_NEWLINE, 1 + DEBUG_NEWLINE_LEN});
}
PARSE_TRY(ctx.j_state.extract_element(&output, ctx.list_element));
if (ctx.list_element && last_result != parse_result::EMPTY) { element_count++; }
} break;
}
}
return parse_result::SUCCESS;
}
// hardcoding this for now. to reach a stack depth of 8 would require
// a JSONPath containing 7 nested wildcards so this is probably reasonable.
constexpr int max_command_stack_depth = 8;
/**
* @brief Parse a single json string using the provided command buffer
*
* This function exists primarily as a shim for debugging purposes.
*
* @param input The incoming json string
* @param input_len Size of the incoming json string
* @param commands The command buffer to be applied to the string. Always ends with a
* path_operator_type::END
* @param out_buf Buffer user to store the results of the query (nullptr in the size computation
* step)
* @param out_buf_size Size of the output buffer
* @returns A pair containing the result code the output buffer.
*/
__device__ thrust::pair<parse_result, json_output> get_json_object_single(
char const* input,
size_t input_len,
path_operator const* const commands,
char* out_buf,
size_t out_buf_size)
{
json_state j_state(input, input_len);
json_output output{out_buf_size, out_buf};
auto const result = parse_json_path<max_command_stack_depth>(j_state, commands, output);
return {result, output};
}
/**
* @brief Kernel for running the JSONPath query.
*
* This kernel operates in a 2-pass way. On the first pass, it computes
* output sizes. On the second pass it fills in the provided output buffers
* (chars and validity)
*
* @param col Device view of the incoming string
* @param commands JSONPath command buffer
* @param output_offsets Buffer used to store the string offsets for the results of the query
* @param out_buf Buffer used to store the results of the query
* @param out_validity Output validity buffer
* @param out_valid_count Output count of # of valid bits
*/
template <int block_size>
__launch_bounds__(block_size) __global__
void get_json_object_kernel(column_device_view col,
path_operator const* const commands,
offset_type* output_offsets,
thrust::optional<char*> out_buf,
thrust::optional<bitmask_type*> out_validity,
thrust::optional<size_type*> out_valid_count)
{
size_type tid = threadIdx.x + (blockDim.x * blockIdx.x);
size_type stride = blockDim.x * gridDim.x;
if (out_valid_count.has_value()) { *(out_valid_count.value()) = 0; }
size_type warp_valid_count{0};
auto active_threads = __ballot_sync(0xffffffff, tid < col.size());
while (tid < col.size()) {
bool is_valid = false;
string_view const str = col.element<string_view>(tid);
size_type output_size = 0;
if (str.size_bytes() > 0) {
char* dst = out_buf.has_value() ? out_buf.value() + output_offsets[tid] : nullptr;
size_t const dst_size =
out_buf.has_value() ? output_offsets[tid + 1] - output_offsets[tid] : 0;
parse_result result;
json_output out;
thrust::tie(result, out) =
get_json_object_single(str.data(), str.size_bytes(), commands, dst, dst_size);
output_size = out.output_len.value_or(0);
if (out.output_len.has_value() && result == parse_result::SUCCESS) { is_valid = true; }
}
// filled in only during the precompute step. during the compute step, the offsets
// are fed back in so we do -not- want to write them out
if (!out_buf.has_value()) { output_offsets[tid] = static_cast<offset_type>(output_size); }
// validity filled in only during the output step
if (out_validity.has_value()) {
uint32_t mask = __ballot_sync(active_threads, is_valid);
// 0th lane of the warp writes the validity
if (!(tid % cudf::detail::warp_size)) {
out_validity.value()[cudf::word_index(tid)] = mask;
warp_valid_count += __popc(mask);
}
}
tid += stride;
active_threads = __ballot_sync(active_threads, tid < col.size());
}
// sum the valid counts across the whole block
if (out_valid_count) {
size_type block_valid_count =
cudf::detail::single_lane_block_sum_reduce<block_size, 0>(warp_valid_count);
if (threadIdx.x == 0) { atomicAdd(out_valid_count.value(), block_valid_count); }
}
}
/**
* @copydoc cudf::strings::detail::get_json_object
*/
std::unique_ptr<cudf::column> get_json_object(cudf::strings_column_view const& col,
cudf::string_scalar const& json_path,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// preprocess the json_path into a command buffer
auto preprocess = build_command_buffer(json_path, stream);
CUDF_EXPECTS(std::get<1>(preprocess) <= max_command_stack_depth,
"Encountered JSONPath string that is too complex");
// allocate output offsets buffer.
auto offsets = cudf::make_fixed_width_column(
data_type{type_id::INT32}, col.size() + 1, mask_state::UNALLOCATED, stream, mr);
cudf::mutable_column_view offsets_view(*offsets);
// if the query is empty, return a string column containing all nulls
if (!std::get<0>(preprocess).has_value()) {
return std::make_unique<column>(
data_type{type_id::STRING},
col.size(),
rmm::device_buffer{0, stream, mr}, // no data
cudf::detail::create_null_mask(col.size(), mask_state::ALL_NULL, stream, mr),
col.size()); // null count
}
constexpr int block_size = 512;
cudf::detail::grid_1d const grid{col.size(), block_size};
auto cdv = column_device_view::create(col.parent(), stream);
// preprocess sizes (returned in the offsets buffer)
get_json_object_kernel<block_size>
<<<grid.num_blocks, grid.num_threads_per_block, 0, stream.value()>>>(
*cdv,
std::get<0>(preprocess).value().data(),
offsets_view.head<offset_type>(),
thrust::nullopt,
thrust::nullopt,
thrust::nullopt);
// convert sizes to offsets
thrust::exclusive_scan(rmm::exec_policy(stream),
offsets_view.head<offset_type>(),
offsets_view.head<offset_type>() + col.size() + 1,
offsets_view.head<offset_type>(),
0);
size_type const output_size =
cudf::detail::get_value<offset_type>(offsets_view, col.size(), stream);
// allocate output string column
auto chars = cudf::make_fixed_width_column(
data_type{type_id::INT8}, output_size, mask_state::UNALLOCATED, stream, mr);
// potential optimization : if we know that all outputs are valid, we could skip creating
// the validity mask altogether
rmm::device_buffer validity =
cudf::detail::create_null_mask(col.size(), mask_state::UNINITIALIZED, stream, mr);
// compute results
cudf::mutable_column_view chars_view(*chars);
rmm::device_scalar<size_type> d_valid_count{0, stream};
get_json_object_kernel<block_size>
<<<grid.num_blocks, grid.num_threads_per_block, 0, stream.value()>>>(
*cdv,
std::get<0>(preprocess).value().data(),
offsets_view.head<offset_type>(),
chars_view.head<char>(),
static_cast<bitmask_type*>(validity.data()),
d_valid_count.data());
return make_strings_column(col.size(),
std::move(offsets),
std::move(chars),
col.size() - d_valid_count.value(),
std::move(validity),
stream,
mr);
}
} // namespace
} // namespace detail
/**
* @copydoc cudf::strings::get_json_object
*/
std::unique_ptr<cudf::column> get_json_object(cudf::strings_column_view const& col,
cudf::string_scalar const& json_path,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::get_json_object(col, json_path, 0, mr);
}
} // namespace strings
} // namespace cudf
|
f8674576a4415501be4b4e51e11cc580b9a39cea.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Open sourced multi-head attention
**/
#include <type_traits>
#include <stdint.h>
#include "fastertransformer/open_decoder.h"
#include "hipcub/hipcub.hpp"
#include "fastertransformer/utils/nvtx_utils.h"
#include "masked_multihead_attention.h"
namespace fastertransformer{
const int WARP_SIZE = 32;
const bool ATTENION_OPT = true;
const int ATTENTION_BLOCK_SIZE = 256;
///////////////////////////////////////////////////////////////////////////////////////////////////
template <int HALF_ELEMENTS_PER_WARP_LOAD>
using Copy_half_t =
typename std::conditional<HALF_ELEMENTS_PER_WARP_LOAD == 32, half,
typename std::conditional<HALF_ELEMENTS_PER_WARP_LOAD == 64, int,
typename std::conditional<HALF_ELEMENTS_PER_WARP_LOAD == 128, int2, int4
>::type
>::type
>::type;
template <typename T, int ELEMENTS_PER_WARP_LOAD>
using Copy_t = Copy_half_t<sizeof(T) / sizeof(half) * ELEMENTS_PER_WARP_LOAD>;
///////////////////////////////////////////////////////////////////////////////////////////////////
/**
masked multi-head attention
*/
#define FINAL_MASK 0xffffffff
template <typename T>
__inline__ __device__
T warpReduceSum(T val)
{
for(int mask = 16; mask > 0; mask >>= 1)
val += __shfl_xor_sync(FINAL_MASK, val, mask, 32);
return val;
}
/* Calculate the sum of all elements in a block */
template <typename T>
__inline__ __device__
T blockReduceSum(T val)
{
static __shared__ T shared[32];
// __shared__ T shared[32];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
val = warpReduceSum<T>(val);
if(lane == 0)
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)(0.0f);
val = warpReduceSum<T>(val);
return val;
}
template <typename T>
__inline__ __device__
T warpReduceMax(T val)
{
for(int mask = 16; mask > 0; mask >>= 1)
val = max(val, __shfl_xor_sync(FINAL_MASK, val, mask, 32));
return val;
}
/* Calculate the maximum of all elements in a block */
template <typename T>
__inline__ __device__
T blockReduceMax(T val)
{
static __shared__ T shared[32];
// __shared__ T shared[32];
int lane = threadIdx.x & 0x1f; // in-warp idx
int wid = threadIdx.x >> 5; // warp idx
val = warpReduceMax(val); // get maxx in each warp
if(lane == 0) // record in-warp maxx by warp Idx
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)-1e20f;
val = warpReduceMax(val);
return val;
}
template <int size_per_head, int block_sz, typename T>
__global__
void masked_attention_kernel_opt(
T* __restrict key_buf, T* __restrict value_buf,
T* __restrict query_buf, const T* __restrict self_Q_bias,
T* __restrict key_cache, const T* __restrict self_K_bias,
T* __restrict value_cache, const T* __restrict self_V_bias,
T* __restrict context_buf, const bool* finished,
int batch_size, int head_num, const int step, const T scalar)
{
if(finished != nullptr && finished[blockIdx.x / head_num] == true) return;
typedef Copy_t<T, size_per_head> copy_t;
const int elems_per_thread = size_per_head / WARP_SIZE;
union Access_t
{
copy_t v;
T x[elems_per_thread]; // supported size 1,2,4
};
typedef struct Float_n_t
{
T x[elems_per_thread]; // supported size 1,2,4
} float_n_t;
__shared__ float_n_t sq[block_sz];
extern __shared__ float logits[]; // use to store the logits from [0~step]
const int tid = threadIdx.x;
const int warp_num = block_sz / WARP_SIZE;
const int bid = blockIdx.x;
const int head_id = blockIdx.x % head_num;
const int warp_id = tid / WARP_SIZE; // warp_id in block
const int lane_id = tid % WARP_SIZE; // lane_id in warp
typedef hipcub::BlockReduce<float, block_sz> MaxValBlockReduce;
typedef hipcub::BlockReduce<float, block_sz> BlockReduce;
__shared__ typename MaxValBlockReduce::TempStorage max_val_block_temp_storage;
__shared__ typename BlockReduce::TempStorage block_temp_storage;
__shared__ typename hipcub::WarpReduce<float>::TempStorage temp_storage[warp_num];
int qkv_id = bid * size_per_head;
int qkv_bias_id = head_id * size_per_head;
query_buf = &query_buf[qkv_id];
key_buf = &key_buf[qkv_id];
value_buf = &value_buf[qkv_id];
self_K_bias = &self_K_bias[qkv_bias_id];
key_cache = &key_cache[qkv_id];
self_Q_bias = &self_Q_bias[qkv_bias_id];
self_V_bias = &self_V_bias[qkv_bias_id];
value_cache = &value_cache[qkv_id];
context_buf = &context_buf[qkv_id];
Access_t bias_r, query_buf_r;
Access_t key_val_r, key_buf_r;
Access_t value_val_r, value_buf_r;
// each warp will have its own copy of sq
query_buf_r.v = *((copy_t *)query_buf + lane_id);
key_buf_r.v = *((copy_t *)key_buf + lane_id);
bias_r.v = *((copy_t *)self_Q_bias + lane_id);
float qb_r[elems_per_thread];
for (int i = 0; i < elems_per_thread; ++i)
{
qb_r[i] = (float)query_buf_r.x[i] + (float)bias_r.x[i];
}
//offset for each step
int offset = batch_size * head_num * size_per_head;
bias_r.v = *((copy_t *) self_K_bias + lane_id);
for(int ite = warp_id; ite < step; ite += warp_num)
{
key_val_r.v = *((copy_t *)&key_cache[ite * offset] + lane_id);
//for the last step, we should update K + bias_K to the cache
if(ite == step - 1)
{
for (int i = 0; i < elems_per_thread; i++)
{
key_val_r.x[i] = (float)key_buf_r.x[i] + (float)bias_r.x[i];
}
*((copy_t *)&key_cache[ite * offset] + lane_id) = key_val_r.v;
}
float val = 0.f;
for (int i = 0; i < elems_per_thread; i++)
{
val = val + (float)key_val_r.x[i] * qb_r[i] * (float)scalar;
}
float qk = hipcub::WarpReduce<float>(temp_storage[warp_id]).Sum(val);
if (lane_id == 0)
{
logits[ite] = qk;
}
}
__syncthreads();
__shared__ float s_max_val, s_sum;
float local_i = -1e20f;
for(int i = tid; i < step; i += blockDim.x)
local_i = max(local_i, logits[i]);
float max_val = MaxValBlockReduce(max_val_block_temp_storage).Reduce(local_i, hipcub::Max());
if(tid == 0)
s_max_val = max_val;
__syncthreads();
float local_o = 0.0f;
for(int i = tid; i < step; i += blockDim.x)
{
logits[i] = __expf(logits[i] - s_max_val);
local_o += logits[i];
}
float val = BlockReduce(block_temp_storage).Sum(local_o);
if(tid == 0)
s_sum = val + 1e-6;
__syncthreads();
float s_sum_inverse = __fdividef(1.0f, s_sum);
for(int i = tid; i < step; i += blockDim.x)
{
logits[i] = logits[i] * s_sum_inverse;
}
__syncthreads();
// This optimization introduces discrepancy because of different order in FP32 summation
float sum_r[elems_per_thread] = {0.f};
bias_r.v = *((copy_t *) self_V_bias + lane_id);
value_buf_r.v = *((copy_t *)value_buf + lane_id);
for(int ite = warp_id; ite < step; ite += warp_num)
{
value_val_r.v = *((copy_t *)&value_cache[ite * offset] + lane_id);
//for the last step, we should update K + bias_K to the cache
if(ite == step - 1)
{
for (int i = 0; i < elems_per_thread; i++)
{
value_val_r.x[i] = (float)value_buf_r.x[i] + (float)bias_r.x[i];
}
*((copy_t *)&value_cache[ite * offset] + lane_id) = value_val_r.v;
}
for (int i = 0; i < elems_per_thread; ++i)
{
sum_r[i] += (float)value_val_r.x[i] * logits[ite];
}
}
for (int i = 0; i < elems_per_thread; i++)
{
sq[warp_id * WARP_SIZE + lane_id].x[i] = sum_r[i];
}
__syncthreads();
if (warp_id == 0)
{
#pragma unroll
for (int j = 1; j < warp_num; j++)
{
for (int i = 0; i < elems_per_thread; ++i)
{
sum_r[i] = sum_r[i] + (float)sq[j * WARP_SIZE + tid].x[i];
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < elems_per_thread; i++)
{
value_val_r.x[i] = sum_r[i];
}
if (warp_id == 0)
{
*((copy_t *)context_buf + lane_id) = value_val_r.v;
}
}
template <typename T>
__global__
void masked_attention_kernel(
T* key_buf, T* value_buf,
T* query_buf, const T* self_Q_bias,
T* key_cache, const T* self_K_bias, T* value_cache, const T* self_V_bias,
T* context_buf, const bool* finished,
int batch_size, int head_num, int size_per_head, const int step, const T scalar)
{
if(finished != nullptr && finished[blockIdx.x / head_num] == true) return;
extern __shared__ __align__(sizeof(T)) unsigned s_buf[];
T* sq = reinterpret_cast<T *>(s_buf);
T* logits = reinterpret_cast<T *>(&sq[size_per_head]);
int tid = threadIdx.x;
int bid = blockIdx.x / head_num;
int head_id = blockIdx.x % head_num;
int qkv_id = bid * head_num * size_per_head + head_id * size_per_head + tid;
int qkv_bias_id = head_id * size_per_head + tid;
if(tid < size_per_head)
sq[tid] = query_buf[qkv_id] + self_Q_bias[qkv_bias_id];
__syncthreads();
//offset for each step
int offset = batch_size * head_num * size_per_head;
for(int ite = 0; ite < step; ++ite)
{
T key = tid < size_per_head ? key_cache[ite * offset + qkv_id] : (T)0.0f;
//for the last step, we should update K + bias_K to the cache
if(ite == step - 1 && tid < size_per_head)
{
key = key_buf[qkv_id] + self_K_bias[qkv_bias_id];
key_cache[ite * offset + qkv_id] = key;
}
T val = (tid < size_per_head) ? key * sq[tid] * scalar : (T)(0.0f);
T qk = blockReduceSum(val);
if(threadIdx.x == 0)
logits[ite] = qk;
__syncthreads(); //try to remove
}
__syncthreads(); //try to remove
__shared__ float s_max_val, s_sum;
float local_i = tid < step ? (float)logits[tid] : -1e20f;
float max_val = blockReduceMax<float>(local_i);
if(tid == 0)
s_max_val = max_val;
__syncthreads();
local_i -= s_max_val;
float local_o = tid < step ? __expf(local_i) : 0.0f;
float val = blockReduceSum<float>(local_o);
if(tid == 0)
s_sum = val + 1e-6;
__syncthreads();
if(tid < step)
logits[tid] = local_o / s_sum;
__syncthreads();
if(tid < size_per_head)
{
T sum = (T)0.0f;
for(int ite = 0; ite < step; ++ite)
{
T value = value_cache[ite * offset + qkv_id];
//for the last step, we should update K + bias_K to the cache
if(ite == step - 1)
{
value = value_buf[qkv_id] + self_V_bias[qkv_bias_id];
value_cache[ite * offset + qkv_id] = value;
}
sum += value * logits[ite];
}
context_buf[qkv_id] = sum;
}
}
template <typename T>
void masked_attention_dispatch(
T* key_buf, T* value_buf,
T* query_buf, const T* self_Q_bias,
T* key_cache, const T* self_K_bias, T* value_cache, const T* self_V_bias,
T* context_buf, const bool* finished, int max_batch_size, int inference_batch_size,
int head_num, int size_per_head, const int step, const int max_seq_len, hipStream_t stream)
{
if (max_seq_len < 0) {
const int block_sz = ATTENTION_BLOCK_SIZE;
T scalar = (T)(1.f / sqrtf(size_per_head * 1.0f));
dim3 grid(inference_batch_size * head_num);
int cond = size_per_head * ((ATTENION_OPT)? 1:0);
switch (cond)
{
case 32:
hipLaunchKernelGGL(( masked_attention_kernel_opt<32, block_sz, T>), dim3(grid), dim3(block_sz), sizeof(float)*step, stream,
key_buf, value_buf,
query_buf, self_Q_bias, key_cache, self_K_bias, value_cache, self_V_bias, context_buf, finished,
max_batch_size, head_num, step, scalar);
break;
case 64:
hipLaunchKernelGGL(( masked_attention_kernel_opt<64, block_sz, T>), dim3(grid), dim3(block_sz), sizeof(float)*step, stream,
key_buf, value_buf,
query_buf, self_Q_bias,
key_cache, self_K_bias,
value_cache, self_V_bias,
context_buf,
finished,
max_batch_size, head_num, step, scalar);
break;
case 128:
hipLaunchKernelGGL(( masked_attention_kernel_opt<128, block_sz, T>), dim3(grid), dim3(block_sz), sizeof(float)*step, stream,
key_buf, value_buf,
query_buf, self_Q_bias, key_cache, self_K_bias, value_cache, self_V_bias, context_buf, finished,
max_batch_size, head_num, step, scalar);
break;
default:
// default path
int block_size = 128;
//suppose size_per_head <= 128
if(step <= 64)
block_size = 64;
else if(step <= 128 && step > size_per_head)
block_size = 128;
else if(step > 128 && step <= 256)
block_size = 256;
else if(step > 256 && step <= 512)
block_size = 512;
else
block_size = 1024;
if((int)block_size < size_per_head)
block_size = size_per_head;
assert(block_size <= 1024);
dim3 block(block_size);
T scalar = 1 / sqrtf(size_per_head * 1.0f);
int shared_size = sizeof(T) * (size_per_head + step);
hipLaunchKernelGGL(( masked_attention_kernel<T>), dim3(grid), dim3(block), shared_size, stream,
key_buf, value_buf,
query_buf, self_Q_bias,
key_cache, self_K_bias,
value_cache, self_V_bias,
context_buf, finished, max_batch_size,
head_num, size_per_head, step, scalar);
}
}
else {
assert(step > 0);
assert(size_per_head == 32 || size_per_head == 64 || size_per_head == 128);
using DataType = typename std::conditional<sizeof(T) == 4, float, uint16_t>::type;
// Prepare the parameters.
Masked_multihead_attention_params<DataType> params;
memset(¶ms, 0, sizeof(params));
params.q_bias = reinterpret_cast<const DataType *>(self_Q_bias);
params.k_bias = reinterpret_cast<const DataType *>(self_K_bias);
params.v_bias = reinterpret_cast<const DataType *>(self_V_bias);
// Set the output buffer.
params.out = reinterpret_cast<DataType *>(context_buf);
// Set the input buffers.
params.q = reinterpret_cast<const DataType *>(query_buf);
params.k = reinterpret_cast<const DataType *>(key_buf);
params.v = reinterpret_cast<const DataType *>(value_buf);
params.stride = 0;
params.finished = const_cast<bool*>(finished);
params.k_cache = reinterpret_cast<DataType *>(key_cache);
params.v_cache = reinterpret_cast<DataType *>(value_cache);
params.batch_size = inference_batch_size;
params.seq_length = max_seq_len;
params.timestep = step-1;
params.num_heads = head_num;
params.hidden_size_per_head = size_per_head;
params.inv_sqrt_dh = 1.F / sqrtf((float) params.hidden_size_per_head);
masked_multihead_attention(params, stream);
}
}
template void masked_attention_dispatch(
float* key_buf,
float* value_buf,
float* query_buf,
const float* self_Q_bias,
float* key_cache,
const float* self_K_bias,
float* value_cache,
const float* self_V_bias,
float* context_buf,
const bool* finished,
int max_batch_size,
int inference_batch_size,
int head_num,
int size_per_head,
const int step,
const int max_seq_size,
hipStream_t stream);
template void masked_attention_dispatch(
half* key_buf,
half* value_buf,
half* query_buf,
const half* self_Q_bias,
half* key_cache,
const half* self_K_bias,
half* value_cache,
const half* self_V_bias,
half* context_buf,
const bool* finished,
int max_batch_size,
int inference_batch_size,
int head_num,
int size_per_head,
const int step,
const int max_seq_size,
hipStream_t stream);
template <int size_per_head, int block_sz, typename T>
__global__
void fusedQKV_masked_attention_kernel_opt(
const T* __restrict qkv_buf, const T* __restrict qkv_bias,
T* __restrict key_cache,
T* __restrict value_cache,
T* __restrict context_buf, const bool* finished, int batch_size, int head_num, const int step, const T scalar)
{
if(finished != nullptr && finished[blockIdx.x / head_num] == true) return;
typedef Copy_t<T, size_per_head> copy_t;
const int elems_per_thread = size_per_head / WARP_SIZE;
union Access_t
{
copy_t v;
T x[elems_per_thread]; // supported size 1,2,4
};
typedef struct Float_n_t
{
T x[elems_per_thread]; // supported size 1,2,4
} float_n_t;
__shared__ float_n_t sq[block_sz];
extern __shared__ float logits[]; // use to store the logits from [0~step]
const int tid = threadIdx.x;
const int warp_num = block_sz / WARP_SIZE;
const int bid = blockIdx.x;
const int head_id = blockIdx.x % head_num;
const int warp_id = tid / WARP_SIZE; // warp_id in block
const int lane_id = tid % WARP_SIZE; // lane_id in warp
const int batch_id = bid / head_num;
const int hidden_units = head_num * size_per_head;
typedef hipcub::BlockReduce<float, block_sz> MaxValBlockReduce;
typedef hipcub::BlockReduce<float, block_sz> BlockReduce;
__shared__ typename MaxValBlockReduce::TempStorage max_val_block_temp_storage;
__shared__ typename BlockReduce::TempStorage block_temp_storage;
__shared__ typename hipcub::WarpReduce<float>::TempStorage temp_storage[warp_num];
int qkv_id = batch_id * 3 * hidden_units + head_id * size_per_head;
int qkv_bias_id = head_id * size_per_head;
int cache_qkv_id = bid * size_per_head;
const T* query_buf = qkv_buf + qkv_id;
const T* key_buf = qkv_buf + hidden_units + qkv_id;
const T* value_buf = qkv_buf + 2 * hidden_units + qkv_id;
const T* self_Q_bias = qkv_bias + qkv_bias_id;
const T* self_K_bias = qkv_bias + hidden_units + qkv_bias_id;
const T* self_V_bias = qkv_bias + 2 * hidden_units + qkv_bias_id;
value_cache = value_cache + cache_qkv_id;
key_cache = key_cache + cache_qkv_id;
context_buf = context_buf + cache_qkv_id;
Access_t bias_r, query_buf_r;
Access_t key_val_r, key_buf_r;
Access_t value_val_r, value_buf_r;
// each warp will have its own copy of sq
query_buf_r.v = *((copy_t *)query_buf + lane_id);
key_buf_r.v = *((copy_t *)key_buf + lane_id);
bias_r.v = *((copy_t *)self_Q_bias + lane_id);
float qb_r[elems_per_thread];
for (int i = 0; i < elems_per_thread; ++i)
{
qb_r[i] = (float)query_buf_r.x[i] + (float)bias_r.x[i];
}
//offset for each step
int offset = batch_size * hidden_units;
bias_r.v = *((copy_t *) self_K_bias + lane_id);
for(int ite = warp_id; ite < step; ite += warp_num)
{
key_val_r.v = *((copy_t *)&key_cache[ite * offset] + lane_id);
//for the last step, we should update K + bias_K to the cache
if(ite == step - 1)
{
for (int i = 0; i < elems_per_thread; i++)
{
key_val_r.x[i] = (float)key_buf_r.x[i] + (float)bias_r.x[i];
}
*((copy_t *)&key_cache[ite * offset] + lane_id) = key_val_r.v;
}
float val = 0.f;
for (int i = 0; i < elems_per_thread; i++)
{
val = val + (float)key_val_r.x[i] * qb_r[i] * (float)scalar;
}
float qk = hipcub::WarpReduce<float>(temp_storage[warp_id]).Sum(val);
if (lane_id == 0)
{
logits[ite] = qk;
}
}
__syncthreads();
__shared__ float s_max_val, s_sum;
float local_i = -1e20f;
for(int i = tid; i < step; i += blockDim.x)
local_i = max(local_i, logits[i]);
float max_val = MaxValBlockReduce(max_val_block_temp_storage).Reduce(local_i, hipcub::Max());
if(tid == 0)
s_max_val = max_val;
__syncthreads();
float local_o = 0.0f;
for(int i = tid; i < step; i += blockDim.x)
{
logits[i] = __expf(logits[i] - s_max_val);
local_o += logits[i];
}
float val = BlockReduce(block_temp_storage).Sum(local_o);
if(tid == 0)
s_sum = val + 1e-6;
__syncthreads();
float s_sum_inverse = __fdividef(1.0f, s_sum);
for(int i = tid; i < step; i += blockDim.x)
{
logits[i] = logits[i] * s_sum_inverse;
}
__syncthreads();
// This optimization introduces discrepancy because of different order in FP32 summation
float sum_r[elems_per_thread] = {0.f};
bias_r.v = *((copy_t *) self_V_bias + lane_id);
value_buf_r.v = *((copy_t *)value_buf + lane_id);
for(int ite = warp_id; ite < step; ite += warp_num)
{
value_val_r.v = *((copy_t *)&value_cache[ite * offset] + lane_id);
//for the last step, we should update K + bias_K to the cache
if(ite == step - 1)
{
for (int i = 0; i < elems_per_thread; i++)
{
value_val_r.x[i] = (float)value_buf_r.x[i] + (float)bias_r.x[i];
}
*((copy_t *)&value_cache[ite * offset] + lane_id) = value_val_r.v;
}
for (int i = 0; i < elems_per_thread; ++i)
{
sum_r[i] += (float)value_val_r.x[i] * logits[ite];
}
}
for (int i = 0; i < elems_per_thread; i++)
{
sq[warp_id * WARP_SIZE + lane_id].x[i] = sum_r[i];
}
__syncthreads();
if (warp_id == 0)
{
#pragma unroll
for (int j = 1; j < warp_num; j++)
{
for (int i = 0; i < elems_per_thread; ++i)
{
sum_r[i] = sum_r[i] + (float)sq[j * WARP_SIZE + tid].x[i];
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < elems_per_thread; i++)
{
value_val_r.x[i] = sum_r[i];
}
if (warp_id == 0)
{
*((copy_t *)context_buf + lane_id) = value_val_r.v;
}
}
template <typename T>
void fusedQKV_masked_attention_dispatch(
const T* qkv_buf, const T* qkv_bias,
T* key_cache, T* value_cache,
T* context_buf, const bool* finished, int max_batch_size, int inference_batch_size,
int head_num, int size_per_head, const int step, const int max_seq_len, hipStream_t stream)
{
if (max_seq_len < 0) {
const int block_sz = ATTENTION_BLOCK_SIZE;
T scalar = (T)(1.f / sqrtf(size_per_head * 1.0f));
dim3 grid(inference_batch_size * head_num);
int cond = size_per_head * ((ATTENION_OPT)? 1:0);
switch (cond)
{
case 32:
hipLaunchKernelGGL(( fusedQKV_masked_attention_kernel_opt<32, block_sz, T>), dim3(grid), dim3(block_sz), sizeof(float)*step, stream,
qkv_buf, qkv_bias,
key_cache, value_cache,
context_buf,
finished,
max_batch_size, head_num, step, scalar);
break;
case 64:
hipLaunchKernelGGL(( fusedQKV_masked_attention_kernel_opt<64, block_sz, T>), dim3(grid), dim3(block_sz), sizeof(float)*step, stream,
qkv_buf, qkv_bias,
key_cache,
value_cache,
context_buf,
finished,
max_batch_size, head_num, step, scalar);
break;
case 128:
hipLaunchKernelGGL(( fusedQKV_masked_attention_kernel_opt<128, block_sz, T>), dim3(grid), dim3(block_sz), sizeof(float)*step, stream,
qkv_buf, qkv_bias,
key_cache,
value_cache,
context_buf,
finished,
max_batch_size, head_num, step, scalar);
break;
default:
assert(false);
}
}
else {
using DataType = typename std::conditional<sizeof(T) == 4, float, uint16_t>::type;
// Prepare the parameters.
Masked_multihead_attention_params<DataType> params;
memset(¶ms, 0, sizeof(params));
int hidden_units = head_num * size_per_head;
params.q_bias = reinterpret_cast<const DataType *>(qkv_bias);
params.k_bias = reinterpret_cast<const DataType *>(qkv_bias) + hidden_units;
params.v_bias = reinterpret_cast<const DataType *>(qkv_bias) + 2 * hidden_units;
// Set the output buffer.
params.out = reinterpret_cast<DataType *>(context_buf);
// Set the input buffers.
params.q = reinterpret_cast<const DataType *>(qkv_buf);
params.k = reinterpret_cast<const DataType *>(qkv_buf) + hidden_units;
params.v = reinterpret_cast<const DataType *>(qkv_buf) + 2 * hidden_units;
params.stride = 3 * hidden_units;
params.finished = const_cast<bool*>(finished);
params.k_cache = reinterpret_cast<DataType *>(key_cache);
params.v_cache = reinterpret_cast<DataType *>(value_cache);
params.batch_size = inference_batch_size;
params.seq_length = max_seq_len;
params.timestep = step-1;
params.num_heads = head_num;
params.hidden_size_per_head = size_per_head;
params.inv_sqrt_dh = 1.F / sqrtf((float) params.hidden_size_per_head);
masked_multihead_attention(params, stream);
}
}
template void fusedQKV_masked_attention_dispatch(
const float* qkv_buf,
const float* qkv_bias,
float* key_cache,
float* value_cache,
float* context_buf,
const bool* finished,
int max_batch_size,
int inference_batch_size,
int head_num,
int size_per_head,
const int step,
const int max_seq_len,
hipStream_t stream);
template void fusedQKV_masked_attention_dispatch(
const half* qkv_buf,
const half* qkv_bias,
half* key_cache,
half* value_cache,
half* context_buf,
const bool* finished,
int max_batch_size,
int inference_batch_size,
int head_num,
int size_per_head,
const int step,
const int max_seq_len,
hipStream_t stream);
template <typename T>
void fusedQKV_masked_attention_kernelLauncher(
const T* qkv_buf,
const T* qkv_bias,
T* k_cache,
T* v_cache,
T* output,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const int max_seq_len,
hipStream_t stream)
{
fusedQKV_masked_attention_dispatch(qkv_buf,
qkv_bias,
k_cache,
v_cache,
output,
nullptr,
batch_size,
batch_size,
head_num,
size_per_head,
seq_len,
max_seq_len,
stream);
}
template<typename T>
__global__ void transpose_4d(T* dst, T* src,
const int dim0,
const int dim1,
const int dim2,
const int dim3,
const int dim0_leading_dim,
const int ite)
{
// transpose from [dim0, dim1, dim2, dim3] to [dim2, X, dim1, dim3]
// where the dimension of X is dim0_leading_dim, and offset is ite * dim0
for(int i = threadIdx.x + blockIdx.x * blockDim.x; i < dim0 * dim1 * dim2 * dim3; i+= blockDim.x * gridDim.x)
{
int index = i;
const int d3 = index % dim3;
index = (index - d3) / dim3;
const int d2 = index % dim2;
index = (index - d2) / dim2;
const int d1 = index % dim1;
index = (index - d1) / dim1;
const int d0 = index % dim0;
index = (index - d0) / dim0;
dst[d2 * dim0_leading_dim * dim1 * dim3 + (d0 + dim0 * ite) * dim1 * dim3 + d1 * dim3 + d3] = src[i];
}
}
template<>
__global__ void transpose_4d(half* dst, half* src,
const int dim0,
const int dim1,
const int dim2,
const int dim3,
const int dim0_leading_dim,
const int ite)
{
half2 *dst_ptr = (half2 *) dst;
half2 *src_ptr = (half2 *) src;
const int half_dim3 = dim3 / 2;
// transpose from [dim0, dim1, dim2, half_dim3] to [dim2, dim0, dim1, half_dim3]
// where the dimension of X is dim0_leading_dim, and offset is ite * dim0
for(int i = threadIdx.x + blockIdx.x * blockDim.x; i < dim0 * dim1 * dim2 * half_dim3; i+= blockDim.x * gridDim.x)
{
int index = i;
const int d3 = index % half_dim3;
index = (index - d3) / half_dim3;
const int d2 = index % dim2;
index = (index - d2) / dim2;
const int d1 = index % dim1;
index = (index - d1) / dim1;
const int d0 = index % dim0;
index = (index - d0) / dim0;
dst_ptr[d2 * dim0_leading_dim * dim1 * half_dim3 + (d0 + dim0 * ite) * dim1 * half_dim3 + d1 * half_dim3 + d3] = src_ptr[i];
}
}
template<typename T>
void transpose_4d_kernelLauncher(T* dst, T* src,
const int local_batch_size,
const int seq_len,
const int size_per_head,
const int local_hidden_units,
const int local_head_num,
const int batch_size,
const int ite,
hipStream_t stream)
{
hipLaunchKernelGGL(( transpose_4d), dim3(local_batch_size * seq_len * local_hidden_units / 512), dim3(512 / (4 / (sizeof(T)))), 0, stream,
dst, src,
local_batch_size, local_head_num,
seq_len, size_per_head, batch_size, ite);
}
template void transpose_4d_kernelLauncher(
float* dst,
float* src,
const int local_batch_size,
const int seq_len,
const int size_per_head,
const int local_hidden_units,
const int local_head_num,
const int batch_size,
const int ite,
hipStream_t stream);
template void transpose_4d_kernelLauncher(
half* dst,
half* src,
const int local_batch_size,
const int seq_len,
const int size_per_head,
const int local_hidden_units,
const int local_head_num,
const int batch_size,
const int ite,
hipStream_t stream);
#define NEW_TRANSPOSE_BATCH_MAJOR 1
template<typename T>
__global__ void transpose_4d_batch_major_k_cache(T* k_dst, const T* k_src,
const int head_num,
const int size_per_head,
const int seq_len,
const int max_seq_len)
{
const int batch_id = blockIdx.y;
const int head_id = blockIdx.z;
constexpr int X_ELEMS = (sizeof(T) == 4)? 4 : 8;
auto key_src = reinterpret_cast<const uint4*>(k_src + batch_id * head_num * size_per_head * seq_len + head_id * size_per_head * seq_len);
auto key_dst = reinterpret_cast<uint4*>(k_dst + batch_id * head_num * size_per_head * max_seq_len + head_id * size_per_head * max_seq_len);
const int out_idx = blockIdx.x * blockDim.x + threadIdx.x;
int size_per_head_div_x = size_per_head / X_ELEMS;
if (out_idx >= head_num * size_per_head_div_x * max_seq_len) return;
int idx = out_idx;
const int k_seq_len_id = idx % max_seq_len;
idx = (idx - k_seq_len_id) / max_seq_len;
const int k_head_size_id = idx % size_per_head_div_x;
if (k_seq_len_id < seq_len)
key_dst[out_idx] = key_src[k_seq_len_id * size_per_head_div_x + k_head_size_id];
}
template<typename T>
__global__ void transpose_4d_batch_major_v_cache(T* v_dst, const T* v_src,
const int head_num,
const int size_per_head,
const int seq_len,
const int max_seq_len)
{
const int batch_id = blockIdx.y;
const int head_id = blockIdx.z;
// 16 byte loads will handle "x" dimension
auto val_src = reinterpret_cast<const uint4*>(v_src + batch_id * head_num * size_per_head * seq_len + head_id * size_per_head * seq_len);
auto val_dst = reinterpret_cast<uint4*>(v_dst + batch_id * head_num * size_per_head * max_seq_len + head_id * size_per_head * max_seq_len);
// idx is over output dimension L * size_per_head / x for values
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
constexpr int X_ELEMS = (sizeof(T) == 4)? 4 : 8;
const int size_per_head_div_x = size_per_head / X_ELEMS;
if (idx >= size_per_head_div_x * seq_len) return;
val_dst[idx] = val_src[idx];
}
template<typename T>
__global__ void transpose_4d_batch_major(T* k_dst, T* v_dst,
const T* k_src, const T* v_src,
const int head_num,
const int size_per_head,
const int seq_len,
const int max_seq_len)
{
const int hidden_dim = 768; //head_num * size_per_head;
const int x = (sizeof(T) == 4)? 4 : 8;
const int size_per_head_split = size_per_head / x;
const int batch_id = blockIdx.x;
const int seq_id = blockIdx.y;
for(int id = threadIdx.x; id < head_num * size_per_head_split * x; id += blockDim.x)
{
int tmp_id = id;
int x_id = tmp_id % x;
tmp_id = (tmp_id - x_id) / x;
int size_id = tmp_id % size_per_head_split;
tmp_id = (tmp_id - size_id) / size_per_head_split;
int head_id = tmp_id % head_num;
// key: [B, head_num, L, size_per_head / x, x] -> [B, head_num, size_per_head / x, L, x]
k_dst[batch_id * hidden_dim * max_seq_len + head_id * size_per_head * max_seq_len + size_id * max_seq_len * x + seq_id * x + x_id] =
k_src[batch_id * hidden_dim * seq_len + head_id * size_per_head * seq_len + seq_id * size_per_head + size_id * x + x_id];
// value: [B, head_num, L, size_per_head / x, x] -> [B, head_num, L, size_per_head/x, x]
v_dst[batch_id * hidden_dim * max_seq_len + head_id * size_per_head * max_seq_len + seq_id * size_per_head + size_id * x + x_id] =
v_src[batch_id * hidden_dim * seq_len + head_id * size_per_head * seq_len + seq_id * size_per_head + size_id * x + x_id];
}
}
template<typename T>
void transpose_4d_batch_major_kernelLauncher(T* k_dst, T* v_dst,
const T* k_src, const T* v_src,
const int local_batch_size,
const int seq_len,
const int max_seq_len,
const int size_per_head,
const int local_head_num,
hipStream_t stream)
{
constexpr int block_sz = 128;
#if NEW_TRANSPOSE_BATCH_MAJOR == 1
constexpr int x = (sizeof(T) == 4)? 4 : 8;
int size = max_seq_len * size_per_head / x;
dim3 grid((size + block_sz - 1) / block_sz, local_batch_size, local_head_num);
dim3 grid_v((seq_len * size_per_head / x + block_sz - 1) / block_sz, local_batch_size, local_head_num);
hipLaunchKernelGGL(( transpose_4d_batch_major_k_cache), dim3(grid), dim3(block_sz), 0, stream,
k_dst, k_src,
local_head_num,
size_per_head,
seq_len,
max_seq_len
);
hipLaunchKernelGGL(( transpose_4d_batch_major_v_cache), dim3(grid_v), dim3(block_sz), 0, stream,
v_dst, v_src,
local_head_num,
size_per_head,
seq_len,
max_seq_len
);
#else
dim3 grid(local_batch_size, seq_len);
hipLaunchKernelGGL(( transpose_4d_batch_major), dim3(grid), dim3(block_sz), 0, stream,
k_dst, v_dst,
k_src, v_src,
local_head_num,
size_per_head,
seq_len,
max_seq_len
);
#endif
}
template void transpose_4d_batch_major_kernelLauncher(float* k_dst, float* v_dst,
const float* k_src, const float* v_src,
const int local_batch_size,
const int seq_len,
const int max_seq_len,
const int size_per_head,
const int local_head_num,
hipStream_t stream);
template void transpose_4d_batch_major_kernelLauncher(half* k_dst, half* v_dst,
const half* k_src, const half* v_src,
const int local_batch_size,
const int seq_len,
const int max_seq_len,
const int size_per_head,
const int local_head_num,
hipStream_t stream);
template<typename T>
__global__
void add_QKV_bias_generalized_2(const T* __restrict QKV,
const T* __restrict bias,
T* q_buf_, T* k_buf_, T* v_buf_,
const int batch_size, const int seq_len,
const int head_num, const int size_per_head,
const int word_per_block)
{
// QKV: [batch x sequence length, hidden * 3]
const T* data_ptr;
T* buf_ptr;
int n = head_num * size_per_head;
const int blocks_per_word = n / blockDim.x;
const int blocks_per_buffer = gridDim.x / 3;
const int qkv_id = blockIdx.x / blocks_per_buffer;
const int block_id_in_buffer = blockIdx.x % blocks_per_buffer;
const int offset = block_id_in_buffer * blockDim.x + threadIdx.x;
const int bias_id = offset % n;
T* buf_ptrs[3] = {q_buf_, k_buf_, v_buf_};
const int bid = blockIdx.x;
for(int index = threadIdx.x; index < n; index += blockDim.x)
{
buf_ptrs[index / n][bid * n + index % n] = QKV[bid * 3 * n + index] + __ldg(&bias[index]);
}
}
template <typename T, int size_per_head, int block_sz>
__global__
void cross_attention_kernel_opt(
T* __restrict query_buf, const T* __restrict Q_bias,
T* __restrict key_cache, const T* __restrict K_bias,
T* __restrict value_cache, const T* __restrict V_bias,
const int* length_per_sample, T* __restrict context_buf,
const bool* finished,
int batch_size, int head_num, const int step, const int seq_len, const float scalar)
{
if(finished != nullptr && finished[blockIdx.x / head_num] == true) return;
typedef Copy_t<T, size_per_head> copy_t;
const int elems_per_thread = size_per_head / WARP_SIZE;
union Access_t
{
copy_t v;
T x[elems_per_thread]; // supported size 1,2,4
};
typedef struct Float_n_t
{
float x[elems_per_thread]; // supported size 1,2,4
} float_n_t;
__shared__ float_n_t sq[block_sz];
extern __shared__ float logits[]; // use to store the logits from [0~step]
const int warp_id = threadIdx.x / WARP_SIZE;
const int warp_num = block_sz / WARP_SIZE;
typedef hipcub::BlockReduce<float, block_sz> MaxValBlockReduce;
typedef hipcub::BlockReduce<float, block_sz> BlockReduce;
__shared__ typename MaxValBlockReduce::TempStorage max_val_block_temp_storage;
__shared__ typename BlockReduce::TempStorage block_temp_storage;
__shared__ typename hipcub::WarpReduce<float>::TempStorage temp_storage[warp_num];
const int tid = threadIdx.x;
const int bid = blockIdx.x / head_num;
const int head_id = blockIdx.x % head_num;
int length = __ldg(&length_per_sample[bid]);
const int lane_id = tid % WARP_SIZE;
int qkv_id = bid * head_num * size_per_head + head_id * size_per_head;
int qkv_bias_id = head_id * size_per_head;
int key_value_id = bid * (seq_len * head_num * size_per_head) +
+ head_id * size_per_head;
query_buf = &query_buf[qkv_id];
K_bias = &K_bias[qkv_bias_id];
key_cache = &key_cache[key_value_id];
Q_bias = &Q_bias[qkv_bias_id];
V_bias = &V_bias[qkv_bias_id];
value_cache = &value_cache[key_value_id];
context_buf = &context_buf[qkv_id];
Access_t bias_r, key_val_r, query_buf_r;
// each warp will have its own copy of sq
query_buf_r.v = *((copy_t *)query_buf + lane_id);
bias_r.v = *((copy_t *)Q_bias + lane_id);
float qb_r[elems_per_thread];
for (int i = 0; i < elems_per_thread; ++i)
{
qb_r[i] = (float)query_buf_r.x[i] + (float)bias_r.x[i];
}
//offset for each step
int offset = head_num * size_per_head;
bias_r.v = *((copy_t *) K_bias + lane_id);
for(int ite = warp_id; ite < length; ite += warp_num)
{
key_val_r.v = *((copy_t *)&key_cache[ite * offset] + lane_id);
//For the first step, we should add bias to key memory cache.
//The KV memory cache only need to be updated at the first step.
if (step == 1)
{
for (int i = 0; i < elems_per_thread; i++)
{
key_val_r.x[i] = (float)key_val_r.x[i] + (float)bias_r.x[i];
}
*((copy_t *)&key_cache[ite * offset] + lane_id) = key_val_r.v;
}
float val = 0.f;
for (int i = 0; i < elems_per_thread; i++)
{
val = val + (float)key_val_r.x[i] * qb_r[i] * scalar;
}
float qk = hipcub::WarpReduce<float>(temp_storage[warp_id]).Sum(val);
if (lane_id == 0)
{
logits[ite] = qk;
}
}
__syncthreads();
__shared__ float s_max_val, s_sum;
float local_i = -1e20f;
for(int i = tid; i < length; i += blockDim.x)
local_i = max(local_i, logits[i]);
float max_val = MaxValBlockReduce(max_val_block_temp_storage).Reduce(local_i, hipcub::Max());
if(tid == 0)
s_max_val = max_val;
__syncthreads();
float local_o = 0.0f;
for(int i = tid; i < length; i += blockDim.x)
{
logits[i] = __expf(logits[i] - s_max_val);
local_o += logits[i];
}
float val = BlockReduce(block_temp_storage).Sum(local_o);
if(tid == 0)
s_sum = val + 1e-6;
__syncthreads();
float s_sum_inverse = __fdividef(1.0f, s_sum);
for(int i = tid; i < length; i += blockDim.x)
{
logits[i] = logits[i] * s_sum_inverse;
}
__syncthreads();
// This optimization introduces discrepancy because of different order in FP32 summation
float sum_r[elems_per_thread] = {0.f};
bias_r.v = *((copy_t *) V_bias + lane_id);
for(int ite = warp_id; ite < length; ite += warp_num)
{
key_val_r.v = *((copy_t *)&value_cache[ite * offset] + lane_id);
//For the first step, we should add bias to key memory cache.
if(step == 1)
{
for (int i = 0; i < elems_per_thread; i++)
{
key_val_r.x[i] = (float)key_val_r.x[i] + (float)bias_r.x[i];
}
*((copy_t *)&value_cache[ite * offset] + lane_id) = key_val_r.v;
}
for (int i = 0; i < elems_per_thread; ++i)
{
sum_r[i] += (float)key_val_r.x[i] * logits[ite];
}
}
for (int i = 0; i < elems_per_thread; i++)
{
sq[warp_id * WARP_SIZE + lane_id].x[i] = sum_r[i];
}
__syncthreads();
if (threadIdx.x < WARP_SIZE)
{
#pragma unroll
for (int j = 1; j < warp_num; j++)
{
for (int i = 0; i < elems_per_thread; ++i)
{
sum_r[i] = sum_r[i] + (float)sq[j * WARP_SIZE + threadIdx.x].x[i];
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < elems_per_thread; i++)
{
key_val_r.x[i] = sum_r[i];
}
if (threadIdx.x < WARP_SIZE)
{
*((copy_t *)context_buf + lane_id) = key_val_r.v;
}
}
template<typename T>
__global__
void cross_attention_kernel(
T* query_buf, const T* Q_bias,
T* key_cache, const T* K_bias,
T* value_cache, const T* V_bias,
const int* length_per_sample, T* context_buf,
const bool* finished,
int batch_size, int head_num, int size_per_head, int step, const int seq_len, const T scalar)
{
if(finished != nullptr && finished[blockIdx.x / head_num] == true) return;
int tid = threadIdx.x;
int bid = blockIdx.x / head_num;
int head_id = blockIdx.x % head_num;
extern __shared__ __align__(sizeof(T)) unsigned s_buf[];
T* sq = reinterpret_cast<T *>(s_buf);
T* logits = reinterpret_cast<T *>(&sq[size_per_head]);
int length = __ldg(&length_per_sample[bid]);
int qkv_id = bid * head_num * size_per_head + head_id * size_per_head + tid;
int qkv_bias_id = head_id * size_per_head + tid;
if(tid < size_per_head)
sq[tid] = query_buf[qkv_id] + Q_bias[qkv_bias_id];
__syncthreads();
for(int ite = 0; ite < length; ++ite)
{
int key_id = bid * (seq_len * head_num * size_per_head) + ite * (head_num * size_per_head)
+ head_id * size_per_head + tid;
T key = tid < size_per_head ? key_cache[key_id] : (T)(0.0f);
//For the first step, we should add bias to key memory cache.
//The KV memory cache only need to be updated at the first step.
if(step == 1 && tid < size_per_head)
{
key += K_bias[head_id * size_per_head + tid];
key_cache[key_id] = key;
}
T val = (tid < size_per_head) ? key * sq[tid] * scalar : (T)(0.0f);
T qk = blockReduceSum(val);
if(threadIdx.x == 0)
logits[ite] = qk;
__syncthreads(); //try to remove
}
__syncthreads();
__shared__ float s_max_val, s_sum;
float local_i = tid < length ? (float)logits[tid] : -1e20f;
float max_val = blockReduceMax<float>(local_i);
if(tid == 0)
s_max_val = max_val;
__syncthreads();
local_i -= s_max_val;
float local_o = tid < length ? __expf(local_i) : 0.0f;
float val = blockReduceSum<float>(local_o);
if(tid == 0)
s_sum = val + 1e-6;
__syncthreads();
if(tid < length)
logits[tid] = local_o / s_sum;
__syncthreads();
if(tid < size_per_head)
{
T sum = (T)0.0f;
for(int ite = 0; ite < length; ++ite)
{
int value_id = bid * seq_len * head_num * size_per_head + ite * head_num * size_per_head
+ head_id * size_per_head + tid;
T value = value_cache[value_id];
//for the first step, we should add bias to key memory cache
if(step == 1)
{
value += V_bias[head_id * size_per_head + tid];
value_cache[value_id] = value;
}
sum += value * logits[ite];
}
context_buf[bid * head_num * size_per_head + head_id * size_per_head + tid] = sum;
}
}
template <typename T>
void cross_attention_dispatch(T* query_buf, const T* Q_bias,
T* key_cache, const T* K_bias, T* value_cache, const T* V_bias, const int* length,
T* context_buf, const bool* finished,
int batch_size, int head_num, int size_per_head, int step, int seq_len, hipStream_t stream)
{
const int block_sz = ATTENTION_BLOCK_SIZE;
float scalar = 1.f / sqrtf(size_per_head * 1.0f);
dim3 grid(batch_size * head_num);
int cond = size_per_head * ((ATTENION_OPT)? 1:0);
switch (cond)
{
case 32:
hipLaunchKernelGGL(( cross_attention_kernel_opt<T, 32, block_sz>), dim3(grid), dim3(block_sz), sizeof(float)*seq_len, stream,
query_buf, Q_bias, key_cache, K_bias, value_cache, V_bias, length, context_buf, finished,
batch_size, head_num, step, seq_len, scalar);
break;
case 64:
hipLaunchKernelGGL(( cross_attention_kernel_opt<T, 64, block_sz>), dim3(grid), dim3(block_sz), sizeof(float)*seq_len, stream,
query_buf, Q_bias, key_cache, K_bias, value_cache, V_bias, length, context_buf, finished,
batch_size, head_num, step, seq_len, scalar);
break;
case 128:
hipLaunchKernelGGL(( cross_attention_kernel_opt<T, 128, block_sz>), dim3(grid), dim3(block_sz), sizeof(float)*seq_len, stream,
query_buf, Q_bias, key_cache, K_bias, value_cache, V_bias, length, context_buf, finished,
batch_size, head_num, step, seq_len, scalar);
break;
default:
// default path
int block_size = 128;
if(seq_len <= 64)
block_size = 64;
else if(seq_len <= 128 && seq_len > size_per_head)
block_size = 128;
else if(seq_len > 128 && seq_len <= 256)
block_size = 256;
else if(seq_len > 256 && seq_len <= 512)
block_size = 512;
else
block_size = 1024;
if(block_size < size_per_head)
block_size = size_per_head;
assert(block_size <= 1024);
dim3 block(block_size);
int shared_size = sizeof(T) * (size_per_head + seq_len);
hipLaunchKernelGGL(( cross_attention_kernel<T>), dim3(grid), dim3(block), shared_size, stream,
query_buf, Q_bias,
key_cache, K_bias,
value_cache, V_bias,
length, context_buf, finished,
batch_size,
head_num, size_per_head, step, seq_len, scalar);
}
}
template void cross_attention_dispatch(
float* query_buf,
const float* Q_bias,
float* key_cache,
const float* K_bias,
float* value_cache,
const float* V_bias,
const int* length,
float* context_buf,
const bool* finished,
int batch_size,
int head_num,
int size_per_head,
int step,
int seq_len,
hipStream_t stream);
template void cross_attention_dispatch(
half* query_buf,
const half* Q_bias,
half* key_cache,
const half* K_bias,
half* value_cache,
const half* V_bias,
const int* length,
half* context_buf,
const bool* finished,
int batch_size,
int head_num,
int size_per_head,
int step,
int seq_len,
hipStream_t stream);
template void fusedQKV_masked_attention_kernelLauncher(
const float* qkv_buf,
const float* qkv_bias,
float* k_cache,
float* v_cache,
float* output,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const int max_seq_len,
hipStream_t stream);
template void fusedQKV_masked_attention_kernelLauncher(
const half* qkv_buf,
const half* qkv_bias,
half* k_cache,
half* v_cache,
half* output,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const int max_seq_len,
hipStream_t stream);
}//namespace fastertransformer
|
f8674576a4415501be4b4e51e11cc580b9a39cea.cu
|
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Open sourced multi-head attention
**/
#include <type_traits>
#include <stdint.h>
#include "fastertransformer/open_decoder.h"
#include "cub/cub.cuh"
#include "fastertransformer/utils/nvtx_utils.h"
#include "masked_multihead_attention.h"
namespace fastertransformer{
const int WARP_SIZE = 32;
const bool ATTENION_OPT = true;
const int ATTENTION_BLOCK_SIZE = 256;
///////////////////////////////////////////////////////////////////////////////////////////////////
template <int HALF_ELEMENTS_PER_WARP_LOAD>
using Copy_half_t =
typename std::conditional<HALF_ELEMENTS_PER_WARP_LOAD == 32, half,
typename std::conditional<HALF_ELEMENTS_PER_WARP_LOAD == 64, int,
typename std::conditional<HALF_ELEMENTS_PER_WARP_LOAD == 128, int2, int4
>::type
>::type
>::type;
template <typename T, int ELEMENTS_PER_WARP_LOAD>
using Copy_t = Copy_half_t<sizeof(T) / sizeof(half) * ELEMENTS_PER_WARP_LOAD>;
///////////////////////////////////////////////////////////////////////////////////////////////////
/**
masked multi-head attention
*/
#define FINAL_MASK 0xffffffff
template <typename T>
__inline__ __device__
T warpReduceSum(T val)
{
for(int mask = 16; mask > 0; mask >>= 1)
val += __shfl_xor_sync(FINAL_MASK, val, mask, 32);
return val;
}
/* Calculate the sum of all elements in a block */
template <typename T>
__inline__ __device__
T blockReduceSum(T val)
{
static __shared__ T shared[32];
// __shared__ T shared[32];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
val = warpReduceSum<T>(val);
if(lane == 0)
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)(0.0f);
val = warpReduceSum<T>(val);
return val;
}
template <typename T>
__inline__ __device__
T warpReduceMax(T val)
{
for(int mask = 16; mask > 0; mask >>= 1)
val = max(val, __shfl_xor_sync(FINAL_MASK, val, mask, 32));
return val;
}
/* Calculate the maximum of all elements in a block */
template <typename T>
__inline__ __device__
T blockReduceMax(T val)
{
static __shared__ T shared[32];
// __shared__ T shared[32];
int lane = threadIdx.x & 0x1f; // in-warp idx
int wid = threadIdx.x >> 5; // warp idx
val = warpReduceMax(val); // get maxx in each warp
if(lane == 0) // record in-warp maxx by warp Idx
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)-1e20f;
val = warpReduceMax(val);
return val;
}
template <int size_per_head, int block_sz, typename T>
__global__
void masked_attention_kernel_opt(
T* __restrict key_buf, T* __restrict value_buf,
T* __restrict query_buf, const T* __restrict self_Q_bias,
T* __restrict key_cache, const T* __restrict self_K_bias,
T* __restrict value_cache, const T* __restrict self_V_bias,
T* __restrict context_buf, const bool* finished,
int batch_size, int head_num, const int step, const T scalar)
{
if(finished != nullptr && finished[blockIdx.x / head_num] == true) return;
typedef Copy_t<T, size_per_head> copy_t;
const int elems_per_thread = size_per_head / WARP_SIZE;
union Access_t
{
copy_t v;
T x[elems_per_thread]; // supported size 1,2,4
};
typedef struct Float_n_t
{
T x[elems_per_thread]; // supported size 1,2,4
} float_n_t;
__shared__ float_n_t sq[block_sz];
extern __shared__ float logits[]; // use to store the logits from [0~step]
const int tid = threadIdx.x;
const int warp_num = block_sz / WARP_SIZE;
const int bid = blockIdx.x;
const int head_id = blockIdx.x % head_num;
const int warp_id = tid / WARP_SIZE; // warp_id in block
const int lane_id = tid % WARP_SIZE; // lane_id in warp
typedef cub::BlockReduce<float, block_sz> MaxValBlockReduce;
typedef cub::BlockReduce<float, block_sz> BlockReduce;
__shared__ typename MaxValBlockReduce::TempStorage max_val_block_temp_storage;
__shared__ typename BlockReduce::TempStorage block_temp_storage;
__shared__ typename cub::WarpReduce<float>::TempStorage temp_storage[warp_num];
int qkv_id = bid * size_per_head;
int qkv_bias_id = head_id * size_per_head;
query_buf = &query_buf[qkv_id];
key_buf = &key_buf[qkv_id];
value_buf = &value_buf[qkv_id];
self_K_bias = &self_K_bias[qkv_bias_id];
key_cache = &key_cache[qkv_id];
self_Q_bias = &self_Q_bias[qkv_bias_id];
self_V_bias = &self_V_bias[qkv_bias_id];
value_cache = &value_cache[qkv_id];
context_buf = &context_buf[qkv_id];
Access_t bias_r, query_buf_r;
Access_t key_val_r, key_buf_r;
Access_t value_val_r, value_buf_r;
// each warp will have its own copy of sq
query_buf_r.v = *((copy_t *)query_buf + lane_id);
key_buf_r.v = *((copy_t *)key_buf + lane_id);
bias_r.v = *((copy_t *)self_Q_bias + lane_id);
float qb_r[elems_per_thread];
for (int i = 0; i < elems_per_thread; ++i)
{
qb_r[i] = (float)query_buf_r.x[i] + (float)bias_r.x[i];
}
//offset for each step
int offset = batch_size * head_num * size_per_head;
bias_r.v = *((copy_t *) self_K_bias + lane_id);
for(int ite = warp_id; ite < step; ite += warp_num)
{
key_val_r.v = *((copy_t *)&key_cache[ite * offset] + lane_id);
//for the last step, we should update K + bias_K to the cache
if(ite == step - 1)
{
for (int i = 0; i < elems_per_thread; i++)
{
key_val_r.x[i] = (float)key_buf_r.x[i] + (float)bias_r.x[i];
}
*((copy_t *)&key_cache[ite * offset] + lane_id) = key_val_r.v;
}
float val = 0.f;
for (int i = 0; i < elems_per_thread; i++)
{
val = val + (float)key_val_r.x[i] * qb_r[i] * (float)scalar;
}
float qk = cub::WarpReduce<float>(temp_storage[warp_id]).Sum(val);
if (lane_id == 0)
{
logits[ite] = qk;
}
}
__syncthreads();
__shared__ float s_max_val, s_sum;
float local_i = -1e20f;
for(int i = tid; i < step; i += blockDim.x)
local_i = max(local_i, logits[i]);
float max_val = MaxValBlockReduce(max_val_block_temp_storage).Reduce(local_i, cub::Max());
if(tid == 0)
s_max_val = max_val;
__syncthreads();
float local_o = 0.0f;
for(int i = tid; i < step; i += blockDim.x)
{
logits[i] = __expf(logits[i] - s_max_val);
local_o += logits[i];
}
float val = BlockReduce(block_temp_storage).Sum(local_o);
if(tid == 0)
s_sum = val + 1e-6;
__syncthreads();
float s_sum_inverse = __fdividef(1.0f, s_sum);
for(int i = tid; i < step; i += blockDim.x)
{
logits[i] = logits[i] * s_sum_inverse;
}
__syncthreads();
// This optimization introduces discrepancy because of different order in FP32 summation
float sum_r[elems_per_thread] = {0.f};
bias_r.v = *((copy_t *) self_V_bias + lane_id);
value_buf_r.v = *((copy_t *)value_buf + lane_id);
for(int ite = warp_id; ite < step; ite += warp_num)
{
value_val_r.v = *((copy_t *)&value_cache[ite * offset] + lane_id);
//for the last step, we should update K + bias_K to the cache
if(ite == step - 1)
{
for (int i = 0; i < elems_per_thread; i++)
{
value_val_r.x[i] = (float)value_buf_r.x[i] + (float)bias_r.x[i];
}
*((copy_t *)&value_cache[ite * offset] + lane_id) = value_val_r.v;
}
for (int i = 0; i < elems_per_thread; ++i)
{
sum_r[i] += (float)value_val_r.x[i] * logits[ite];
}
}
for (int i = 0; i < elems_per_thread; i++)
{
sq[warp_id * WARP_SIZE + lane_id].x[i] = sum_r[i];
}
__syncthreads();
if (warp_id == 0)
{
#pragma unroll
for (int j = 1; j < warp_num; j++)
{
for (int i = 0; i < elems_per_thread; ++i)
{
sum_r[i] = sum_r[i] + (float)sq[j * WARP_SIZE + tid].x[i];
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < elems_per_thread; i++)
{
value_val_r.x[i] = sum_r[i];
}
if (warp_id == 0)
{
*((copy_t *)context_buf + lane_id) = value_val_r.v;
}
}
template <typename T>
__global__
void masked_attention_kernel(
T* key_buf, T* value_buf,
T* query_buf, const T* self_Q_bias,
T* key_cache, const T* self_K_bias, T* value_cache, const T* self_V_bias,
T* context_buf, const bool* finished,
int batch_size, int head_num, int size_per_head, const int step, const T scalar)
{
if(finished != nullptr && finished[blockIdx.x / head_num] == true) return;
extern __shared__ __align__(sizeof(T)) unsigned s_buf[];
T* sq = reinterpret_cast<T *>(s_buf);
T* logits = reinterpret_cast<T *>(&sq[size_per_head]);
int tid = threadIdx.x;
int bid = blockIdx.x / head_num;
int head_id = blockIdx.x % head_num;
int qkv_id = bid * head_num * size_per_head + head_id * size_per_head + tid;
int qkv_bias_id = head_id * size_per_head + tid;
if(tid < size_per_head)
sq[tid] = query_buf[qkv_id] + self_Q_bias[qkv_bias_id];
__syncthreads();
//offset for each step
int offset = batch_size * head_num * size_per_head;
for(int ite = 0; ite < step; ++ite)
{
T key = tid < size_per_head ? key_cache[ite * offset + qkv_id] : (T)0.0f;
//for the last step, we should update K + bias_K to the cache
if(ite == step - 1 && tid < size_per_head)
{
key = key_buf[qkv_id] + self_K_bias[qkv_bias_id];
key_cache[ite * offset + qkv_id] = key;
}
T val = (tid < size_per_head) ? key * sq[tid] * scalar : (T)(0.0f);
T qk = blockReduceSum(val);
if(threadIdx.x == 0)
logits[ite] = qk;
__syncthreads(); //try to remove
}
__syncthreads(); //try to remove
__shared__ float s_max_val, s_sum;
float local_i = tid < step ? (float)logits[tid] : -1e20f;
float max_val = blockReduceMax<float>(local_i);
if(tid == 0)
s_max_val = max_val;
__syncthreads();
local_i -= s_max_val;
float local_o = tid < step ? __expf(local_i) : 0.0f;
float val = blockReduceSum<float>(local_o);
if(tid == 0)
s_sum = val + 1e-6;
__syncthreads();
if(tid < step)
logits[tid] = local_o / s_sum;
__syncthreads();
if(tid < size_per_head)
{
T sum = (T)0.0f;
for(int ite = 0; ite < step; ++ite)
{
T value = value_cache[ite * offset + qkv_id];
//for the last step, we should update K + bias_K to the cache
if(ite == step - 1)
{
value = value_buf[qkv_id] + self_V_bias[qkv_bias_id];
value_cache[ite * offset + qkv_id] = value;
}
sum += value * logits[ite];
}
context_buf[qkv_id] = sum;
}
}
template <typename T>
void masked_attention_dispatch(
T* key_buf, T* value_buf,
T* query_buf, const T* self_Q_bias,
T* key_cache, const T* self_K_bias, T* value_cache, const T* self_V_bias,
T* context_buf, const bool* finished, int max_batch_size, int inference_batch_size,
int head_num, int size_per_head, const int step, const int max_seq_len, cudaStream_t stream)
{
if (max_seq_len < 0) {
const int block_sz = ATTENTION_BLOCK_SIZE;
T scalar = (T)(1.f / sqrtf(size_per_head * 1.0f));
dim3 grid(inference_batch_size * head_num);
int cond = size_per_head * ((ATTENION_OPT)? 1:0);
switch (cond)
{
case 32:
masked_attention_kernel_opt<32, block_sz, T><<<grid, block_sz, sizeof(float)*step, stream>>>(
key_buf, value_buf,
query_buf, self_Q_bias, key_cache, self_K_bias, value_cache, self_V_bias, context_buf, finished,
max_batch_size, head_num, step, scalar);
break;
case 64:
masked_attention_kernel_opt<64, block_sz, T><<<grid, block_sz, sizeof(float)*step, stream>>>(
key_buf, value_buf,
query_buf, self_Q_bias,
key_cache, self_K_bias,
value_cache, self_V_bias,
context_buf,
finished,
max_batch_size, head_num, step, scalar);
break;
case 128:
masked_attention_kernel_opt<128, block_sz, T><<<grid, block_sz, sizeof(float)*step, stream>>>(
key_buf, value_buf,
query_buf, self_Q_bias, key_cache, self_K_bias, value_cache, self_V_bias, context_buf, finished,
max_batch_size, head_num, step, scalar);
break;
default:
// default path
int block_size = 128;
//suppose size_per_head <= 128
if(step <= 64)
block_size = 64;
else if(step <= 128 && step > size_per_head)
block_size = 128;
else if(step > 128 && step <= 256)
block_size = 256;
else if(step > 256 && step <= 512)
block_size = 512;
else
block_size = 1024;
if((int)block_size < size_per_head)
block_size = size_per_head;
assert(block_size <= 1024);
dim3 block(block_size);
T scalar = 1 / sqrtf(size_per_head * 1.0f);
int shared_size = sizeof(T) * (size_per_head + step);
masked_attention_kernel<T><<<grid, block, shared_size, stream>>>(
key_buf, value_buf,
query_buf, self_Q_bias,
key_cache, self_K_bias,
value_cache, self_V_bias,
context_buf, finished, max_batch_size,
head_num, size_per_head, step, scalar);
}
}
else {
assert(step > 0);
assert(size_per_head == 32 || size_per_head == 64 || size_per_head == 128);
using DataType = typename std::conditional<sizeof(T) == 4, float, uint16_t>::type;
// Prepare the parameters.
Masked_multihead_attention_params<DataType> params;
memset(¶ms, 0, sizeof(params));
params.q_bias = reinterpret_cast<const DataType *>(self_Q_bias);
params.k_bias = reinterpret_cast<const DataType *>(self_K_bias);
params.v_bias = reinterpret_cast<const DataType *>(self_V_bias);
// Set the output buffer.
params.out = reinterpret_cast<DataType *>(context_buf);
// Set the input buffers.
params.q = reinterpret_cast<const DataType *>(query_buf);
params.k = reinterpret_cast<const DataType *>(key_buf);
params.v = reinterpret_cast<const DataType *>(value_buf);
params.stride = 0;
params.finished = const_cast<bool*>(finished);
params.k_cache = reinterpret_cast<DataType *>(key_cache);
params.v_cache = reinterpret_cast<DataType *>(value_cache);
params.batch_size = inference_batch_size;
params.seq_length = max_seq_len;
params.timestep = step-1;
params.num_heads = head_num;
params.hidden_size_per_head = size_per_head;
params.inv_sqrt_dh = 1.F / sqrtf((float) params.hidden_size_per_head);
masked_multihead_attention(params, stream);
}
}
template void masked_attention_dispatch(
float* key_buf,
float* value_buf,
float* query_buf,
const float* self_Q_bias,
float* key_cache,
const float* self_K_bias,
float* value_cache,
const float* self_V_bias,
float* context_buf,
const bool* finished,
int max_batch_size,
int inference_batch_size,
int head_num,
int size_per_head,
const int step,
const int max_seq_size,
cudaStream_t stream);
template void masked_attention_dispatch(
half* key_buf,
half* value_buf,
half* query_buf,
const half* self_Q_bias,
half* key_cache,
const half* self_K_bias,
half* value_cache,
const half* self_V_bias,
half* context_buf,
const bool* finished,
int max_batch_size,
int inference_batch_size,
int head_num,
int size_per_head,
const int step,
const int max_seq_size,
cudaStream_t stream);
template <int size_per_head, int block_sz, typename T>
__global__
void fusedQKV_masked_attention_kernel_opt(
const T* __restrict qkv_buf, const T* __restrict qkv_bias,
T* __restrict key_cache,
T* __restrict value_cache,
T* __restrict context_buf, const bool* finished, int batch_size, int head_num, const int step, const T scalar)
{
if(finished != nullptr && finished[blockIdx.x / head_num] == true) return;
typedef Copy_t<T, size_per_head> copy_t;
const int elems_per_thread = size_per_head / WARP_SIZE;
union Access_t
{
copy_t v;
T x[elems_per_thread]; // supported size 1,2,4
};
typedef struct Float_n_t
{
T x[elems_per_thread]; // supported size 1,2,4
} float_n_t;
__shared__ float_n_t sq[block_sz];
extern __shared__ float logits[]; // use to store the logits from [0~step]
const int tid = threadIdx.x;
const int warp_num = block_sz / WARP_SIZE;
const int bid = blockIdx.x;
const int head_id = blockIdx.x % head_num;
const int warp_id = tid / WARP_SIZE; // warp_id in block
const int lane_id = tid % WARP_SIZE; // lane_id in warp
const int batch_id = bid / head_num;
const int hidden_units = head_num * size_per_head;
typedef cub::BlockReduce<float, block_sz> MaxValBlockReduce;
typedef cub::BlockReduce<float, block_sz> BlockReduce;
__shared__ typename MaxValBlockReduce::TempStorage max_val_block_temp_storage;
__shared__ typename BlockReduce::TempStorage block_temp_storage;
__shared__ typename cub::WarpReduce<float>::TempStorage temp_storage[warp_num];
int qkv_id = batch_id * 3 * hidden_units + head_id * size_per_head;
int qkv_bias_id = head_id * size_per_head;
int cache_qkv_id = bid * size_per_head;
const T* query_buf = qkv_buf + qkv_id;
const T* key_buf = qkv_buf + hidden_units + qkv_id;
const T* value_buf = qkv_buf + 2 * hidden_units + qkv_id;
const T* self_Q_bias = qkv_bias + qkv_bias_id;
const T* self_K_bias = qkv_bias + hidden_units + qkv_bias_id;
const T* self_V_bias = qkv_bias + 2 * hidden_units + qkv_bias_id;
value_cache = value_cache + cache_qkv_id;
key_cache = key_cache + cache_qkv_id;
context_buf = context_buf + cache_qkv_id;
Access_t bias_r, query_buf_r;
Access_t key_val_r, key_buf_r;
Access_t value_val_r, value_buf_r;
// each warp will have its own copy of sq
query_buf_r.v = *((copy_t *)query_buf + lane_id);
key_buf_r.v = *((copy_t *)key_buf + lane_id);
bias_r.v = *((copy_t *)self_Q_bias + lane_id);
float qb_r[elems_per_thread];
for (int i = 0; i < elems_per_thread; ++i)
{
qb_r[i] = (float)query_buf_r.x[i] + (float)bias_r.x[i];
}
//offset for each step
int offset = batch_size * hidden_units;
bias_r.v = *((copy_t *) self_K_bias + lane_id);
for(int ite = warp_id; ite < step; ite += warp_num)
{
key_val_r.v = *((copy_t *)&key_cache[ite * offset] + lane_id);
//for the last step, we should update K + bias_K to the cache
if(ite == step - 1)
{
for (int i = 0; i < elems_per_thread; i++)
{
key_val_r.x[i] = (float)key_buf_r.x[i] + (float)bias_r.x[i];
}
*((copy_t *)&key_cache[ite * offset] + lane_id) = key_val_r.v;
}
float val = 0.f;
for (int i = 0; i < elems_per_thread; i++)
{
val = val + (float)key_val_r.x[i] * qb_r[i] * (float)scalar;
}
float qk = cub::WarpReduce<float>(temp_storage[warp_id]).Sum(val);
if (lane_id == 0)
{
logits[ite] = qk;
}
}
__syncthreads();
__shared__ float s_max_val, s_sum;
float local_i = -1e20f;
for(int i = tid; i < step; i += blockDim.x)
local_i = max(local_i, logits[i]);
float max_val = MaxValBlockReduce(max_val_block_temp_storage).Reduce(local_i, cub::Max());
if(tid == 0)
s_max_val = max_val;
__syncthreads();
float local_o = 0.0f;
for(int i = tid; i < step; i += blockDim.x)
{
logits[i] = __expf(logits[i] - s_max_val);
local_o += logits[i];
}
float val = BlockReduce(block_temp_storage).Sum(local_o);
if(tid == 0)
s_sum = val + 1e-6;
__syncthreads();
float s_sum_inverse = __fdividef(1.0f, s_sum);
for(int i = tid; i < step; i += blockDim.x)
{
logits[i] = logits[i] * s_sum_inverse;
}
__syncthreads();
// This optimization introduces discrepancy because of different order in FP32 summation
float sum_r[elems_per_thread] = {0.f};
bias_r.v = *((copy_t *) self_V_bias + lane_id);
value_buf_r.v = *((copy_t *)value_buf + lane_id);
for(int ite = warp_id; ite < step; ite += warp_num)
{
value_val_r.v = *((copy_t *)&value_cache[ite * offset] + lane_id);
//for the last step, we should update K + bias_K to the cache
if(ite == step - 1)
{
for (int i = 0; i < elems_per_thread; i++)
{
value_val_r.x[i] = (float)value_buf_r.x[i] + (float)bias_r.x[i];
}
*((copy_t *)&value_cache[ite * offset] + lane_id) = value_val_r.v;
}
for (int i = 0; i < elems_per_thread; ++i)
{
sum_r[i] += (float)value_val_r.x[i] * logits[ite];
}
}
for (int i = 0; i < elems_per_thread; i++)
{
sq[warp_id * WARP_SIZE + lane_id].x[i] = sum_r[i];
}
__syncthreads();
if (warp_id == 0)
{
#pragma unroll
for (int j = 1; j < warp_num; j++)
{
for (int i = 0; i < elems_per_thread; ++i)
{
sum_r[i] = sum_r[i] + (float)sq[j * WARP_SIZE + tid].x[i];
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < elems_per_thread; i++)
{
value_val_r.x[i] = sum_r[i];
}
if (warp_id == 0)
{
*((copy_t *)context_buf + lane_id) = value_val_r.v;
}
}
template <typename T>
void fusedQKV_masked_attention_dispatch(
const T* qkv_buf, const T* qkv_bias,
T* key_cache, T* value_cache,
T* context_buf, const bool* finished, int max_batch_size, int inference_batch_size,
int head_num, int size_per_head, const int step, const int max_seq_len, cudaStream_t stream)
{
if (max_seq_len < 0) {
const int block_sz = ATTENTION_BLOCK_SIZE;
T scalar = (T)(1.f / sqrtf(size_per_head * 1.0f));
dim3 grid(inference_batch_size * head_num);
int cond = size_per_head * ((ATTENION_OPT)? 1:0);
switch (cond)
{
case 32:
fusedQKV_masked_attention_kernel_opt<32, block_sz, T><<<grid, block_sz, sizeof(float)*step, stream>>>(
qkv_buf, qkv_bias,
key_cache, value_cache,
context_buf,
finished,
max_batch_size, head_num, step, scalar);
break;
case 64:
fusedQKV_masked_attention_kernel_opt<64, block_sz, T><<<grid, block_sz, sizeof(float)*step, stream>>>(
qkv_buf, qkv_bias,
key_cache,
value_cache,
context_buf,
finished,
max_batch_size, head_num, step, scalar);
break;
case 128:
fusedQKV_masked_attention_kernel_opt<128, block_sz, T><<<grid, block_sz, sizeof(float)*step, stream>>>(
qkv_buf, qkv_bias,
key_cache,
value_cache,
context_buf,
finished,
max_batch_size, head_num, step, scalar);
break;
default:
assert(false);
}
}
else {
using DataType = typename std::conditional<sizeof(T) == 4, float, uint16_t>::type;
// Prepare the parameters.
Masked_multihead_attention_params<DataType> params;
memset(¶ms, 0, sizeof(params));
int hidden_units = head_num * size_per_head;
params.q_bias = reinterpret_cast<const DataType *>(qkv_bias);
params.k_bias = reinterpret_cast<const DataType *>(qkv_bias) + hidden_units;
params.v_bias = reinterpret_cast<const DataType *>(qkv_bias) + 2 * hidden_units;
// Set the output buffer.
params.out = reinterpret_cast<DataType *>(context_buf);
// Set the input buffers.
params.q = reinterpret_cast<const DataType *>(qkv_buf);
params.k = reinterpret_cast<const DataType *>(qkv_buf) + hidden_units;
params.v = reinterpret_cast<const DataType *>(qkv_buf) + 2 * hidden_units;
params.stride = 3 * hidden_units;
params.finished = const_cast<bool*>(finished);
params.k_cache = reinterpret_cast<DataType *>(key_cache);
params.v_cache = reinterpret_cast<DataType *>(value_cache);
params.batch_size = inference_batch_size;
params.seq_length = max_seq_len;
params.timestep = step-1;
params.num_heads = head_num;
params.hidden_size_per_head = size_per_head;
params.inv_sqrt_dh = 1.F / sqrtf((float) params.hidden_size_per_head);
masked_multihead_attention(params, stream);
}
}
template void fusedQKV_masked_attention_dispatch(
const float* qkv_buf,
const float* qkv_bias,
float* key_cache,
float* value_cache,
float* context_buf,
const bool* finished,
int max_batch_size,
int inference_batch_size,
int head_num,
int size_per_head,
const int step,
const int max_seq_len,
cudaStream_t stream);
template void fusedQKV_masked_attention_dispatch(
const half* qkv_buf,
const half* qkv_bias,
half* key_cache,
half* value_cache,
half* context_buf,
const bool* finished,
int max_batch_size,
int inference_batch_size,
int head_num,
int size_per_head,
const int step,
const int max_seq_len,
cudaStream_t stream);
template <typename T>
void fusedQKV_masked_attention_kernelLauncher(
const T* qkv_buf,
const T* qkv_bias,
T* k_cache,
T* v_cache,
T* output,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const int max_seq_len,
cudaStream_t stream)
{
fusedQKV_masked_attention_dispatch(qkv_buf,
qkv_bias,
k_cache,
v_cache,
output,
nullptr,
batch_size,
batch_size,
head_num,
size_per_head,
seq_len,
max_seq_len,
stream);
}
template<typename T>
__global__ void transpose_4d(T* dst, T* src,
const int dim0,
const int dim1,
const int dim2,
const int dim3,
const int dim0_leading_dim,
const int ite)
{
// transpose from [dim0, dim1, dim2, dim3] to [dim2, X, dim1, dim3]
// where the dimension of X is dim0_leading_dim, and offset is ite * dim0
for(int i = threadIdx.x + blockIdx.x * blockDim.x; i < dim0 * dim1 * dim2 * dim3; i+= blockDim.x * gridDim.x)
{
int index = i;
const int d3 = index % dim3;
index = (index - d3) / dim3;
const int d2 = index % dim2;
index = (index - d2) / dim2;
const int d1 = index % dim1;
index = (index - d1) / dim1;
const int d0 = index % dim0;
index = (index - d0) / dim0;
dst[d2 * dim0_leading_dim * dim1 * dim3 + (d0 + dim0 * ite) * dim1 * dim3 + d1 * dim3 + d3] = src[i];
}
}
template<>
__global__ void transpose_4d(half* dst, half* src,
const int dim0,
const int dim1,
const int dim2,
const int dim3,
const int dim0_leading_dim,
const int ite)
{
half2 *dst_ptr = (half2 *) dst;
half2 *src_ptr = (half2 *) src;
const int half_dim3 = dim3 / 2;
// transpose from [dim0, dim1, dim2, half_dim3] to [dim2, dim0, dim1, half_dim3]
// where the dimension of X is dim0_leading_dim, and offset is ite * dim0
for(int i = threadIdx.x + blockIdx.x * blockDim.x; i < dim0 * dim1 * dim2 * half_dim3; i+= blockDim.x * gridDim.x)
{
int index = i;
const int d3 = index % half_dim3;
index = (index - d3) / half_dim3;
const int d2 = index % dim2;
index = (index - d2) / dim2;
const int d1 = index % dim1;
index = (index - d1) / dim1;
const int d0 = index % dim0;
index = (index - d0) / dim0;
dst_ptr[d2 * dim0_leading_dim * dim1 * half_dim3 + (d0 + dim0 * ite) * dim1 * half_dim3 + d1 * half_dim3 + d3] = src_ptr[i];
}
}
template<typename T>
void transpose_4d_kernelLauncher(T* dst, T* src,
const int local_batch_size,
const int seq_len,
const int size_per_head,
const int local_hidden_units,
const int local_head_num,
const int batch_size,
const int ite,
cudaStream_t stream)
{
transpose_4d<<<local_batch_size * seq_len * local_hidden_units / 512, 512 / (4 / (sizeof(T))), 0, stream>>>(
dst, src,
local_batch_size, local_head_num,
seq_len, size_per_head, batch_size, ite);
}
template void transpose_4d_kernelLauncher(
float* dst,
float* src,
const int local_batch_size,
const int seq_len,
const int size_per_head,
const int local_hidden_units,
const int local_head_num,
const int batch_size,
const int ite,
cudaStream_t stream);
template void transpose_4d_kernelLauncher(
half* dst,
half* src,
const int local_batch_size,
const int seq_len,
const int size_per_head,
const int local_hidden_units,
const int local_head_num,
const int batch_size,
const int ite,
cudaStream_t stream);
#define NEW_TRANSPOSE_BATCH_MAJOR 1
template<typename T>
__global__ void transpose_4d_batch_major_k_cache(T* k_dst, const T* k_src,
const int head_num,
const int size_per_head,
const int seq_len,
const int max_seq_len)
{
const int batch_id = blockIdx.y;
const int head_id = blockIdx.z;
constexpr int X_ELEMS = (sizeof(T) == 4)? 4 : 8;
auto key_src = reinterpret_cast<const uint4*>(k_src + batch_id * head_num * size_per_head * seq_len + head_id * size_per_head * seq_len);
auto key_dst = reinterpret_cast<uint4*>(k_dst + batch_id * head_num * size_per_head * max_seq_len + head_id * size_per_head * max_seq_len);
const int out_idx = blockIdx.x * blockDim.x + threadIdx.x;
int size_per_head_div_x = size_per_head / X_ELEMS;
if (out_idx >= head_num * size_per_head_div_x * max_seq_len) return;
int idx = out_idx;
const int k_seq_len_id = idx % max_seq_len;
idx = (idx - k_seq_len_id) / max_seq_len;
const int k_head_size_id = idx % size_per_head_div_x;
if (k_seq_len_id < seq_len)
key_dst[out_idx] = key_src[k_seq_len_id * size_per_head_div_x + k_head_size_id];
}
template<typename T>
__global__ void transpose_4d_batch_major_v_cache(T* v_dst, const T* v_src,
const int head_num,
const int size_per_head,
const int seq_len,
const int max_seq_len)
{
const int batch_id = blockIdx.y;
const int head_id = blockIdx.z;
// 16 byte loads will handle "x" dimension
auto val_src = reinterpret_cast<const uint4*>(v_src + batch_id * head_num * size_per_head * seq_len + head_id * size_per_head * seq_len);
auto val_dst = reinterpret_cast<uint4*>(v_dst + batch_id * head_num * size_per_head * max_seq_len + head_id * size_per_head * max_seq_len);
// idx is over output dimension L * size_per_head / x for values
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
constexpr int X_ELEMS = (sizeof(T) == 4)? 4 : 8;
const int size_per_head_div_x = size_per_head / X_ELEMS;
if (idx >= size_per_head_div_x * seq_len) return;
val_dst[idx] = val_src[idx];
}
template<typename T>
__global__ void transpose_4d_batch_major(T* k_dst, T* v_dst,
const T* k_src, const T* v_src,
const int head_num,
const int size_per_head,
const int seq_len,
const int max_seq_len)
{
const int hidden_dim = 768; //head_num * size_per_head;
const int x = (sizeof(T) == 4)? 4 : 8;
const int size_per_head_split = size_per_head / x;
const int batch_id = blockIdx.x;
const int seq_id = blockIdx.y;
for(int id = threadIdx.x; id < head_num * size_per_head_split * x; id += blockDim.x)
{
int tmp_id = id;
int x_id = tmp_id % x;
tmp_id = (tmp_id - x_id) / x;
int size_id = tmp_id % size_per_head_split;
tmp_id = (tmp_id - size_id) / size_per_head_split;
int head_id = tmp_id % head_num;
// key: [B, head_num, L, size_per_head / x, x] -> [B, head_num, size_per_head / x, L, x]
k_dst[batch_id * hidden_dim * max_seq_len + head_id * size_per_head * max_seq_len + size_id * max_seq_len * x + seq_id * x + x_id] =
k_src[batch_id * hidden_dim * seq_len + head_id * size_per_head * seq_len + seq_id * size_per_head + size_id * x + x_id];
// value: [B, head_num, L, size_per_head / x, x] -> [B, head_num, L, size_per_head/x, x]
v_dst[batch_id * hidden_dim * max_seq_len + head_id * size_per_head * max_seq_len + seq_id * size_per_head + size_id * x + x_id] =
v_src[batch_id * hidden_dim * seq_len + head_id * size_per_head * seq_len + seq_id * size_per_head + size_id * x + x_id];
}
}
template<typename T>
void transpose_4d_batch_major_kernelLauncher(T* k_dst, T* v_dst,
const T* k_src, const T* v_src,
const int local_batch_size,
const int seq_len,
const int max_seq_len,
const int size_per_head,
const int local_head_num,
cudaStream_t stream)
{
constexpr int block_sz = 128;
#if NEW_TRANSPOSE_BATCH_MAJOR == 1
constexpr int x = (sizeof(T) == 4)? 4 : 8;
int size = max_seq_len * size_per_head / x;
dim3 grid((size + block_sz - 1) / block_sz, local_batch_size, local_head_num);
dim3 grid_v((seq_len * size_per_head / x + block_sz - 1) / block_sz, local_batch_size, local_head_num);
transpose_4d_batch_major_k_cache<<<grid, block_sz, 0, stream>>>(
k_dst, k_src,
local_head_num,
size_per_head,
seq_len,
max_seq_len
);
transpose_4d_batch_major_v_cache<<<grid_v, block_sz, 0, stream>>>(
v_dst, v_src,
local_head_num,
size_per_head,
seq_len,
max_seq_len
);
#else
dim3 grid(local_batch_size, seq_len);
transpose_4d_batch_major<<<grid, block_sz, 0, stream>>>(
k_dst, v_dst,
k_src, v_src,
local_head_num,
size_per_head,
seq_len,
max_seq_len
);
#endif
}
template void transpose_4d_batch_major_kernelLauncher(float* k_dst, float* v_dst,
const float* k_src, const float* v_src,
const int local_batch_size,
const int seq_len,
const int max_seq_len,
const int size_per_head,
const int local_head_num,
cudaStream_t stream);
template void transpose_4d_batch_major_kernelLauncher(half* k_dst, half* v_dst,
const half* k_src, const half* v_src,
const int local_batch_size,
const int seq_len,
const int max_seq_len,
const int size_per_head,
const int local_head_num,
cudaStream_t stream);
template<typename T>
__global__
void add_QKV_bias_generalized_2(const T* __restrict QKV,
const T* __restrict bias,
T* q_buf_, T* k_buf_, T* v_buf_,
const int batch_size, const int seq_len,
const int head_num, const int size_per_head,
const int word_per_block)
{
// QKV: [batch x sequence length, hidden * 3]
const T* data_ptr;
T* buf_ptr;
int n = head_num * size_per_head;
const int blocks_per_word = n / blockDim.x;
const int blocks_per_buffer = gridDim.x / 3;
const int qkv_id = blockIdx.x / blocks_per_buffer;
const int block_id_in_buffer = blockIdx.x % blocks_per_buffer;
const int offset = block_id_in_buffer * blockDim.x + threadIdx.x;
const int bias_id = offset % n;
T* buf_ptrs[3] = {q_buf_, k_buf_, v_buf_};
const int bid = blockIdx.x;
for(int index = threadIdx.x; index < n; index += blockDim.x)
{
buf_ptrs[index / n][bid * n + index % n] = QKV[bid * 3 * n + index] + __ldg(&bias[index]);
}
}
template <typename T, int size_per_head, int block_sz>
__global__
void cross_attention_kernel_opt(
T* __restrict query_buf, const T* __restrict Q_bias,
T* __restrict key_cache, const T* __restrict K_bias,
T* __restrict value_cache, const T* __restrict V_bias,
const int* length_per_sample, T* __restrict context_buf,
const bool* finished,
int batch_size, int head_num, const int step, const int seq_len, const float scalar)
{
if(finished != nullptr && finished[blockIdx.x / head_num] == true) return;
typedef Copy_t<T, size_per_head> copy_t;
const int elems_per_thread = size_per_head / WARP_SIZE;
union Access_t
{
copy_t v;
T x[elems_per_thread]; // supported size 1,2,4
};
typedef struct Float_n_t
{
float x[elems_per_thread]; // supported size 1,2,4
} float_n_t;
__shared__ float_n_t sq[block_sz];
extern __shared__ float logits[]; // use to store the logits from [0~step]
const int warp_id = threadIdx.x / WARP_SIZE;
const int warp_num = block_sz / WARP_SIZE;
typedef cub::BlockReduce<float, block_sz> MaxValBlockReduce;
typedef cub::BlockReduce<float, block_sz> BlockReduce;
__shared__ typename MaxValBlockReduce::TempStorage max_val_block_temp_storage;
__shared__ typename BlockReduce::TempStorage block_temp_storage;
__shared__ typename cub::WarpReduce<float>::TempStorage temp_storage[warp_num];
const int tid = threadIdx.x;
const int bid = blockIdx.x / head_num;
const int head_id = blockIdx.x % head_num;
int length = __ldg(&length_per_sample[bid]);
const int lane_id = tid % WARP_SIZE;
int qkv_id = bid * head_num * size_per_head + head_id * size_per_head;
int qkv_bias_id = head_id * size_per_head;
int key_value_id = bid * (seq_len * head_num * size_per_head) +
+ head_id * size_per_head;
query_buf = &query_buf[qkv_id];
K_bias = &K_bias[qkv_bias_id];
key_cache = &key_cache[key_value_id];
Q_bias = &Q_bias[qkv_bias_id];
V_bias = &V_bias[qkv_bias_id];
value_cache = &value_cache[key_value_id];
context_buf = &context_buf[qkv_id];
Access_t bias_r, key_val_r, query_buf_r;
// each warp will have its own copy of sq
query_buf_r.v = *((copy_t *)query_buf + lane_id);
bias_r.v = *((copy_t *)Q_bias + lane_id);
float qb_r[elems_per_thread];
for (int i = 0; i < elems_per_thread; ++i)
{
qb_r[i] = (float)query_buf_r.x[i] + (float)bias_r.x[i];
}
//offset for each step
int offset = head_num * size_per_head;
bias_r.v = *((copy_t *) K_bias + lane_id);
for(int ite = warp_id; ite < length; ite += warp_num)
{
key_val_r.v = *((copy_t *)&key_cache[ite * offset] + lane_id);
//For the first step, we should add bias to key memory cache.
//The KV memory cache only need to be updated at the first step.
if (step == 1)
{
for (int i = 0; i < elems_per_thread; i++)
{
key_val_r.x[i] = (float)key_val_r.x[i] + (float)bias_r.x[i];
}
*((copy_t *)&key_cache[ite * offset] + lane_id) = key_val_r.v;
}
float val = 0.f;
for (int i = 0; i < elems_per_thread; i++)
{
val = val + (float)key_val_r.x[i] * qb_r[i] * scalar;
}
float qk = cub::WarpReduce<float>(temp_storage[warp_id]).Sum(val);
if (lane_id == 0)
{
logits[ite] = qk;
}
}
__syncthreads();
__shared__ float s_max_val, s_sum;
float local_i = -1e20f;
for(int i = tid; i < length; i += blockDim.x)
local_i = max(local_i, logits[i]);
float max_val = MaxValBlockReduce(max_val_block_temp_storage).Reduce(local_i, cub::Max());
if(tid == 0)
s_max_val = max_val;
__syncthreads();
float local_o = 0.0f;
for(int i = tid; i < length; i += blockDim.x)
{
logits[i] = __expf(logits[i] - s_max_val);
local_o += logits[i];
}
float val = BlockReduce(block_temp_storage).Sum(local_o);
if(tid == 0)
s_sum = val + 1e-6;
__syncthreads();
float s_sum_inverse = __fdividef(1.0f, s_sum);
for(int i = tid; i < length; i += blockDim.x)
{
logits[i] = logits[i] * s_sum_inverse;
}
__syncthreads();
// This optimization introduces discrepancy because of different order in FP32 summation
float sum_r[elems_per_thread] = {0.f};
bias_r.v = *((copy_t *) V_bias + lane_id);
for(int ite = warp_id; ite < length; ite += warp_num)
{
key_val_r.v = *((copy_t *)&value_cache[ite * offset] + lane_id);
//For the first step, we should add bias to key memory cache.
if(step == 1)
{
for (int i = 0; i < elems_per_thread; i++)
{
key_val_r.x[i] = (float)key_val_r.x[i] + (float)bias_r.x[i];
}
*((copy_t *)&value_cache[ite * offset] + lane_id) = key_val_r.v;
}
for (int i = 0; i < elems_per_thread; ++i)
{
sum_r[i] += (float)key_val_r.x[i] * logits[ite];
}
}
for (int i = 0; i < elems_per_thread; i++)
{
sq[warp_id * WARP_SIZE + lane_id].x[i] = sum_r[i];
}
__syncthreads();
if (threadIdx.x < WARP_SIZE)
{
#pragma unroll
for (int j = 1; j < warp_num; j++)
{
for (int i = 0; i < elems_per_thread; ++i)
{
sum_r[i] = sum_r[i] + (float)sq[j * WARP_SIZE + threadIdx.x].x[i];
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < elems_per_thread; i++)
{
key_val_r.x[i] = sum_r[i];
}
if (threadIdx.x < WARP_SIZE)
{
*((copy_t *)context_buf + lane_id) = key_val_r.v;
}
}
template<typename T>
__global__
void cross_attention_kernel(
T* query_buf, const T* Q_bias,
T* key_cache, const T* K_bias,
T* value_cache, const T* V_bias,
const int* length_per_sample, T* context_buf,
const bool* finished,
int batch_size, int head_num, int size_per_head, int step, const int seq_len, const T scalar)
{
if(finished != nullptr && finished[blockIdx.x / head_num] == true) return;
int tid = threadIdx.x;
int bid = blockIdx.x / head_num;
int head_id = blockIdx.x % head_num;
extern __shared__ __align__(sizeof(T)) unsigned s_buf[];
T* sq = reinterpret_cast<T *>(s_buf);
T* logits = reinterpret_cast<T *>(&sq[size_per_head]);
int length = __ldg(&length_per_sample[bid]);
int qkv_id = bid * head_num * size_per_head + head_id * size_per_head + tid;
int qkv_bias_id = head_id * size_per_head + tid;
if(tid < size_per_head)
sq[tid] = query_buf[qkv_id] + Q_bias[qkv_bias_id];
__syncthreads();
for(int ite = 0; ite < length; ++ite)
{
int key_id = bid * (seq_len * head_num * size_per_head) + ite * (head_num * size_per_head)
+ head_id * size_per_head + tid;
T key = tid < size_per_head ? key_cache[key_id] : (T)(0.0f);
//For the first step, we should add bias to key memory cache.
//The KV memory cache only need to be updated at the first step.
if(step == 1 && tid < size_per_head)
{
key += K_bias[head_id * size_per_head + tid];
key_cache[key_id] = key;
}
T val = (tid < size_per_head) ? key * sq[tid] * scalar : (T)(0.0f);
T qk = blockReduceSum(val);
if(threadIdx.x == 0)
logits[ite] = qk;
__syncthreads(); //try to remove
}
__syncthreads();
__shared__ float s_max_val, s_sum;
float local_i = tid < length ? (float)logits[tid] : -1e20f;
float max_val = blockReduceMax<float>(local_i);
if(tid == 0)
s_max_val = max_val;
__syncthreads();
local_i -= s_max_val;
float local_o = tid < length ? __expf(local_i) : 0.0f;
float val = blockReduceSum<float>(local_o);
if(tid == 0)
s_sum = val + 1e-6;
__syncthreads();
if(tid < length)
logits[tid] = local_o / s_sum;
__syncthreads();
if(tid < size_per_head)
{
T sum = (T)0.0f;
for(int ite = 0; ite < length; ++ite)
{
int value_id = bid * seq_len * head_num * size_per_head + ite * head_num * size_per_head
+ head_id * size_per_head + tid;
T value = value_cache[value_id];
//for the first step, we should add bias to key memory cache
if(step == 1)
{
value += V_bias[head_id * size_per_head + tid];
value_cache[value_id] = value;
}
sum += value * logits[ite];
}
context_buf[bid * head_num * size_per_head + head_id * size_per_head + tid] = sum;
}
}
template <typename T>
void cross_attention_dispatch(T* query_buf, const T* Q_bias,
T* key_cache, const T* K_bias, T* value_cache, const T* V_bias, const int* length,
T* context_buf, const bool* finished,
int batch_size, int head_num, int size_per_head, int step, int seq_len, cudaStream_t stream)
{
const int block_sz = ATTENTION_BLOCK_SIZE;
float scalar = 1.f / sqrtf(size_per_head * 1.0f);
dim3 grid(batch_size * head_num);
int cond = size_per_head * ((ATTENION_OPT)? 1:0);
switch (cond)
{
case 32:
cross_attention_kernel_opt<T, 32, block_sz><<<grid, block_sz, sizeof(float)*seq_len, stream>>>(
query_buf, Q_bias, key_cache, K_bias, value_cache, V_bias, length, context_buf, finished,
batch_size, head_num, step, seq_len, scalar);
break;
case 64:
cross_attention_kernel_opt<T, 64, block_sz><<<grid, block_sz, sizeof(float)*seq_len, stream>>>(
query_buf, Q_bias, key_cache, K_bias, value_cache, V_bias, length, context_buf, finished,
batch_size, head_num, step, seq_len, scalar);
break;
case 128:
cross_attention_kernel_opt<T, 128, block_sz><<<grid, block_sz, sizeof(float)*seq_len, stream>>>(
query_buf, Q_bias, key_cache, K_bias, value_cache, V_bias, length, context_buf, finished,
batch_size, head_num, step, seq_len, scalar);
break;
default:
// default path
int block_size = 128;
if(seq_len <= 64)
block_size = 64;
else if(seq_len <= 128 && seq_len > size_per_head)
block_size = 128;
else if(seq_len > 128 && seq_len <= 256)
block_size = 256;
else if(seq_len > 256 && seq_len <= 512)
block_size = 512;
else
block_size = 1024;
if(block_size < size_per_head)
block_size = size_per_head;
assert(block_size <= 1024);
dim3 block(block_size);
int shared_size = sizeof(T) * (size_per_head + seq_len);
cross_attention_kernel<T><<<grid, block, shared_size, stream>>>(
query_buf, Q_bias,
key_cache, K_bias,
value_cache, V_bias,
length, context_buf, finished,
batch_size,
head_num, size_per_head, step, seq_len, scalar);
}
}
template void cross_attention_dispatch(
float* query_buf,
const float* Q_bias,
float* key_cache,
const float* K_bias,
float* value_cache,
const float* V_bias,
const int* length,
float* context_buf,
const bool* finished,
int batch_size,
int head_num,
int size_per_head,
int step,
int seq_len,
cudaStream_t stream);
template void cross_attention_dispatch(
half* query_buf,
const half* Q_bias,
half* key_cache,
const half* K_bias,
half* value_cache,
const half* V_bias,
const int* length,
half* context_buf,
const bool* finished,
int batch_size,
int head_num,
int size_per_head,
int step,
int seq_len,
cudaStream_t stream);
template void fusedQKV_masked_attention_kernelLauncher(
const float* qkv_buf,
const float* qkv_bias,
float* k_cache,
float* v_cache,
float* output,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const int max_seq_len,
cudaStream_t stream);
template void fusedQKV_masked_attention_kernelLauncher(
const half* qkv_buf,
const half* qkv_bias,
half* k_cache,
half* v_cache,
half* output,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const int max_seq_len,
cudaStream_t stream);
}//namespace fastertransformer
|
84c844a94cf5566adf991f0188b0a4c48a233bea.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* =====================================================================================
*
* Filename: lud.cu
*
* Description: The main wrapper for the suite
*
* Version: 1.0
* Created: 10/22/2009 08:40:34 PM
* Revision: none
* Compiler: gcc
*
* Author: Liang Wang (lw2aw), [email protected]
* Company: CS@UVa
*
* =====================================================================================
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <unistd.h>
#include <getopt.h>
#include <stdlib.h>
#include <assert.h>
#include "common.h"
static int do_verify = 0;
static struct option long_options[] = {
/* name, has_arg, flag, val */
{"input", 1, NULL, 'i'},
{"size", 1, NULL, 's'},
{"verify", 0, NULL, 'v'},
{0,0,0,0}
};
extern void
lud_cuda(float *d_m, int matrix_dim);
int
main ( int argc, char *argv[] )
{
int matrix_dim = 32; /* default matrix_dim */
int opt, option_index=0;
func_ret_t ret;
const char *input_file = NULL;
float *m, *d_m, *mm;
stopwatch sw;
while ((opt = getopt_long(argc, argv, "::vs:i:",
long_options, &option_index)) != -1 ) {
switch(opt){
case 'i':
input_file = optarg;
break;
case 'v':
do_verify = 1;
break;
case 's':
matrix_dim = atoi(optarg);
fprintf(stderr, "Currently not supported, use -i instead\n");
fprintf(stderr, "Usage: %s [-v] [-s matrix_size|-i input_file]\n", argv[0]);
exit(EXIT_FAILURE);
case '?':
fprintf(stderr, "invalid option\n");
break;
case ':':
fprintf(stderr, "missing argument\n");
break;
default:
fprintf(stderr, "Usage: %s [-v] [-s matrix_size|-i input_file]\n",
argv[0]);
exit(EXIT_FAILURE);
}
}
if ( (optind < argc) || (optind == 1)) {
fprintf(stderr, "Usage: %s [-v] [-s matrix_size|-i input_file]\n", argv[0]);
exit(EXIT_FAILURE);
}
if (input_file) {
printf("Reading matrix from file %s\n", input_file);
ret = create_matrix_from_file(&m, input_file, &matrix_dim);
if (ret != RET_SUCCESS) {
m = NULL;
fprintf(stderr, "error create matrix from file %s\n", input_file);
exit(EXIT_FAILURE);
}
} else {
printf("No input file specified!\n");
exit(EXIT_FAILURE);
}
if (do_verify){
printf("Before LUD\n");
print_matrix(m, matrix_dim);
matrix_duplicate(m, &mm, matrix_dim);
}
hipMalloc((void**)&d_m,
matrix_dim*matrix_dim*sizeof(float));
/* beginning of timing point */
stopwatch_start(&sw);
hipMemcpy(d_m, m, matrix_dim*matrix_dim*sizeof(float),
hipMemcpyHostToDevice);
lud_cuda(d_m, matrix_dim);
hipMemcpy(m, d_m, matrix_dim*matrix_dim*sizeof(float),
hipMemcpyDeviceToHost);
/* end of timing point */
stopwatch_stop(&sw);
printf("Time consumed(ms): %lf\n", 1000*get_interval_by_sec(&sw));
hipFree(d_m);
if (do_verify){
printf("After LUD\n");
print_matrix(m, matrix_dim);
printf(">>>Verify<<<<\n");
lud_verify(mm, m, matrix_dim);
free(mm);
}
free(m);
return EXIT_SUCCESS;
} /* ---------- end of function main ---------- */
|
84c844a94cf5566adf991f0188b0a4c48a233bea.cu
|
/*
* =====================================================================================
*
* Filename: lud.cu
*
* Description: The main wrapper for the suite
*
* Version: 1.0
* Created: 10/22/2009 08:40:34 PM
* Revision: none
* Compiler: gcc
*
* Author: Liang Wang (lw2aw), [email protected]
* Company: CS@UVa
*
* =====================================================================================
*/
#include <cuda.h>
#include <stdio.h>
#include <unistd.h>
#include <getopt.h>
#include <stdlib.h>
#include <assert.h>
#include "common.h"
static int do_verify = 0;
static struct option long_options[] = {
/* name, has_arg, flag, val */
{"input", 1, NULL, 'i'},
{"size", 1, NULL, 's'},
{"verify", 0, NULL, 'v'},
{0,0,0,0}
};
extern void
lud_cuda(float *d_m, int matrix_dim);
int
main ( int argc, char *argv[] )
{
int matrix_dim = 32; /* default matrix_dim */
int opt, option_index=0;
func_ret_t ret;
const char *input_file = NULL;
float *m, *d_m, *mm;
stopwatch sw;
while ((opt = getopt_long(argc, argv, "::vs:i:",
long_options, &option_index)) != -1 ) {
switch(opt){
case 'i':
input_file = optarg;
break;
case 'v':
do_verify = 1;
break;
case 's':
matrix_dim = atoi(optarg);
fprintf(stderr, "Currently not supported, use -i instead\n");
fprintf(stderr, "Usage: %s [-v] [-s matrix_size|-i input_file]\n", argv[0]);
exit(EXIT_FAILURE);
case '?':
fprintf(stderr, "invalid option\n");
break;
case ':':
fprintf(stderr, "missing argument\n");
break;
default:
fprintf(stderr, "Usage: %s [-v] [-s matrix_size|-i input_file]\n",
argv[0]);
exit(EXIT_FAILURE);
}
}
if ( (optind < argc) || (optind == 1)) {
fprintf(stderr, "Usage: %s [-v] [-s matrix_size|-i input_file]\n", argv[0]);
exit(EXIT_FAILURE);
}
if (input_file) {
printf("Reading matrix from file %s\n", input_file);
ret = create_matrix_from_file(&m, input_file, &matrix_dim);
if (ret != RET_SUCCESS) {
m = NULL;
fprintf(stderr, "error create matrix from file %s\n", input_file);
exit(EXIT_FAILURE);
}
} else {
printf("No input file specified!\n");
exit(EXIT_FAILURE);
}
if (do_verify){
printf("Before LUD\n");
print_matrix(m, matrix_dim);
matrix_duplicate(m, &mm, matrix_dim);
}
cudaMalloc((void**)&d_m,
matrix_dim*matrix_dim*sizeof(float));
/* beginning of timing point */
stopwatch_start(&sw);
cudaMemcpy(d_m, m, matrix_dim*matrix_dim*sizeof(float),
cudaMemcpyHostToDevice);
lud_cuda(d_m, matrix_dim);
cudaMemcpy(m, d_m, matrix_dim*matrix_dim*sizeof(float),
cudaMemcpyDeviceToHost);
/* end of timing point */
stopwatch_stop(&sw);
printf("Time consumed(ms): %lf\n", 1000*get_interval_by_sec(&sw));
cudaFree(d_m);
if (do_verify){
printf("After LUD\n");
print_matrix(m, matrix_dim);
printf(">>>Verify<<<<\n");
lud_verify(mm, m, matrix_dim);
free(mm);
}
free(m);
return EXIT_SUCCESS;
} /* ---------- end of function main ---------- */
|
65e8ccbc84211c766b6e1d8f5572bb2e4163a24d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <cv.h>
#include <highgui.h>
#include <time.h>
#include <hip/hip_runtime.h>
#define RED 2
#define GREEN 1
#define BLUE 0
using namespace cv;
__global__ void img2gray(unsigned char *imageInput, int width, int height, unsigned char *imageOutput){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
if((row < height) && (col < width)){
imageOutput[row*width+col] = imageInput[(row*width+col)*3+RED]*0.299 + imageInput[(row*width+col)*3+GREEN]*0.587 \
+ imageInput[(row*width+col)*3+BLUE]*0.114;
}
}
int main(int argc, char **argv){
hipError_t error = hipSuccess;
clock_t start, end, startGPU, endGPU;
double cpu_time_used, gpu_time_used;
char* imageName = argv[1];
unsigned char *dataRawImage, *d_dataRawImage, *d_imageOutput, *h_imageOutput;
Mat image;
image = imread(imageName, 1);
if(argc !=2 || !image.data){
printf("No image Data \n");
return -1;
}
Size s = image.size();
int width = s.width;
int height = s.height;
int size = sizeof(unsigned char)*width*height*image.channels();
int sizeGray = sizeof(unsigned char)*width*height;
dataRawImage = (unsigned char*)malloc(size);
error = hipMalloc((void**)&d_dataRawImage,size);
if(error != hipSuccess){
printf("Error reservando memoria para d_dataRawImage\n");
exit(-1);
}
h_imageOutput = (unsigned char *)malloc(sizeGray);
error = hipMalloc((void**)&d_imageOutput,sizeGray);
if(error != hipSuccess){
printf("Error reservando memoria para d_imageOutput\n");
exit(-1);
}
dataRawImage = image.data;
/*for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
dataRawImage[(i*width+j)*3+BLUE] = 0;
}
}*/
startGPU = clock();
error = hipMemcpy(d_dataRawImage,dataRawImage,size, hipMemcpyHostToDevice);
if(error != hipSuccess){
printf("Error copiando los datos de dataRawImage a d_dataRawImage \n");
exit(-1);
}
int blockSize = 32;
dim3 dimBlock(blockSize,blockSize,1);
dim3 dimGrid(ceil(width/float(blockSize)),ceil(height/float(blockSize)),1);
hipLaunchKernelGGL(( img2gray), dim3(dimGrid),dim3(dimBlock), 0, 0, d_dataRawImage,width,height,d_imageOutput);
hipDeviceSynchronize();
hipMemcpy(h_imageOutput,d_imageOutput,sizeGray,hipMemcpyDeviceToHost);
endGPU = clock();
Mat gray_image;
gray_image.create(height,width,CV_8UC1);
gray_image.data = h_imageOutput;
start = clock();
Mat gray_image_opencv;
cvtColor(image, gray_image_opencv, CV_BGR2GRAY);
end = clock();
imwrite("./Gray_Image.jpg",gray_image);
namedWindow(imageName, WINDOW_NORMAL);
namedWindow("Gray Image CUDA", WINDOW_NORMAL);
namedWindow("Gray Image OpenCV", WINDOW_NORMAL);
imshow(imageName,image);
imshow("Gray Image CUDA", gray_image);
imshow("Gray Image OpenCV",gray_image_opencv);
waitKey(0);
//free(dataRawImage);
gpu_time_used = ((double) (endGPU - startGPU)) / CLOCKS_PER_SEC;
printf("Tiempo Algoritmo Paralelo: %.10f\n",gpu_time_used);
cpu_time_used = ((double) (end - start)) /CLOCKS_PER_SEC;
printf("Tiempo Algoritmo OpenCV: %.10f\n",cpu_time_used);
printf("La aceleracin obtenida es de %.10fX\n",cpu_time_used/gpu_time_used);
hipFree(d_dataRawImage);
hipFree(d_imageOutput);
return 0;
}
|
65e8ccbc84211c766b6e1d8f5572bb2e4163a24d.cu
|
#include <cv.h>
#include <highgui.h>
#include <time.h>
#include <cuda.h>
#define RED 2
#define GREEN 1
#define BLUE 0
using namespace cv;
__global__ void img2gray(unsigned char *imageInput, int width, int height, unsigned char *imageOutput){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
if((row < height) && (col < width)){
imageOutput[row*width+col] = imageInput[(row*width+col)*3+RED]*0.299 + imageInput[(row*width+col)*3+GREEN]*0.587 \
+ imageInput[(row*width+col)*3+BLUE]*0.114;
}
}
int main(int argc, char **argv){
cudaError_t error = cudaSuccess;
clock_t start, end, startGPU, endGPU;
double cpu_time_used, gpu_time_used;
char* imageName = argv[1];
unsigned char *dataRawImage, *d_dataRawImage, *d_imageOutput, *h_imageOutput;
Mat image;
image = imread(imageName, 1);
if(argc !=2 || !image.data){
printf("No image Data \n");
return -1;
}
Size s = image.size();
int width = s.width;
int height = s.height;
int size = sizeof(unsigned char)*width*height*image.channels();
int sizeGray = sizeof(unsigned char)*width*height;
dataRawImage = (unsigned char*)malloc(size);
error = cudaMalloc((void**)&d_dataRawImage,size);
if(error != cudaSuccess){
printf("Error reservando memoria para d_dataRawImage\n");
exit(-1);
}
h_imageOutput = (unsigned char *)malloc(sizeGray);
error = cudaMalloc((void**)&d_imageOutput,sizeGray);
if(error != cudaSuccess){
printf("Error reservando memoria para d_imageOutput\n");
exit(-1);
}
dataRawImage = image.data;
/*for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
dataRawImage[(i*width+j)*3+BLUE] = 0;
}
}*/
startGPU = clock();
error = cudaMemcpy(d_dataRawImage,dataRawImage,size, cudaMemcpyHostToDevice);
if(error != cudaSuccess){
printf("Error copiando los datos de dataRawImage a d_dataRawImage \n");
exit(-1);
}
int blockSize = 32;
dim3 dimBlock(blockSize,blockSize,1);
dim3 dimGrid(ceil(width/float(blockSize)),ceil(height/float(blockSize)),1);
img2gray<<<dimGrid,dimBlock>>>(d_dataRawImage,width,height,d_imageOutput);
cudaDeviceSynchronize();
cudaMemcpy(h_imageOutput,d_imageOutput,sizeGray,cudaMemcpyDeviceToHost);
endGPU = clock();
Mat gray_image;
gray_image.create(height,width,CV_8UC1);
gray_image.data = h_imageOutput;
start = clock();
Mat gray_image_opencv;
cvtColor(image, gray_image_opencv, CV_BGR2GRAY);
end = clock();
imwrite("./Gray_Image.jpg",gray_image);
namedWindow(imageName, WINDOW_NORMAL);
namedWindow("Gray Image CUDA", WINDOW_NORMAL);
namedWindow("Gray Image OpenCV", WINDOW_NORMAL);
imshow(imageName,image);
imshow("Gray Image CUDA", gray_image);
imshow("Gray Image OpenCV",gray_image_opencv);
waitKey(0);
//free(dataRawImage);
gpu_time_used = ((double) (endGPU - startGPU)) / CLOCKS_PER_SEC;
printf("Tiempo Algoritmo Paralelo: %.10f\n",gpu_time_used);
cpu_time_used = ((double) (end - start)) /CLOCKS_PER_SEC;
printf("Tiempo Algoritmo OpenCV: %.10f\n",cpu_time_used);
printf("La aceleración obtenida es de %.10fX\n",cpu_time_used/gpu_time_used);
cudaFree(d_dataRawImage);
cudaFree(d_imageOutput);
return 0;
}
|
4c45f13f17a68aff640cd7974576beecb985f156.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <ATen/native/hip/fused_adam_impl.cuh>
#include <ATen/Dispatch.h>
#include <ATen/native/ForeachUtils.h>
#include <ATen/native/hip/fused_adam_utils.cuh>
#include <ATen/native/hip/MultiTensorApply.cuh>
#include <vector>
namespace at::native {
void _fused_adam_cuda_impl_(
at::TensorList params,
at::TensorList grads,
at::TensorList exp_avgs,
at::TensorList exp_avg_sqs,
at::TensorList state_steps,
const double lr,
const double beta1,
const double beta2,
const double weight_decay,
const double eps,
const bool amsgrad,
const bool maximize,
const c10::optional<at::Tensor>& grad_scale,
const c10::optional<at::Tensor>& found_inf
) {
std::vector<std::vector<at::Tensor>> tensor_lists{
params.vec(), grads.vec(), exp_avgs.vec(), exp_avg_sqs.vec() };
float* grad_scale_ptr = grad_scale.has_value() ? grad_scale->data_ptr<float>() : nullptr;
float* found_inf_ptr = found_inf.has_value() ? found_inf->data_ptr<float>() : nullptr;
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, params[0].scalar_type(),
"fused_adam_kernel_cuda", [&]() {
multi_tensor_apply_for_fused_optimizer<4>(
tensor_lists,
state_steps,
FusedAdamMathFunctor<scalar_t, 4>(),
lr,
beta1,
beta2,
weight_decay,
eps,
maximize,
/* amsgrad */false,
grad_scale_ptr,
found_inf_ptr);
});
}
} // namespace at::native
|
4c45f13f17a68aff640cd7974576beecb985f156.cu
|
#include <ATen/native/cuda/fused_adam_impl.cuh>
#include <ATen/Dispatch.h>
#include <ATen/native/ForeachUtils.h>
#include <ATen/native/cuda/fused_adam_utils.cuh>
#include <ATen/native/cuda/MultiTensorApply.cuh>
#include <vector>
namespace at::native {
void _fused_adam_cuda_impl_(
at::TensorList params,
at::TensorList grads,
at::TensorList exp_avgs,
at::TensorList exp_avg_sqs,
at::TensorList state_steps,
const double lr,
const double beta1,
const double beta2,
const double weight_decay,
const double eps,
const bool amsgrad,
const bool maximize,
const c10::optional<at::Tensor>& grad_scale,
const c10::optional<at::Tensor>& found_inf
) {
std::vector<std::vector<at::Tensor>> tensor_lists{
params.vec(), grads.vec(), exp_avgs.vec(), exp_avg_sqs.vec() };
float* grad_scale_ptr = grad_scale.has_value() ? grad_scale->data_ptr<float>() : nullptr;
float* found_inf_ptr = found_inf.has_value() ? found_inf->data_ptr<float>() : nullptr;
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, params[0].scalar_type(),
"fused_adam_kernel_cuda", [&]() {
multi_tensor_apply_for_fused_optimizer<4>(
tensor_lists,
state_steps,
FusedAdamMathFunctor<scalar_t, 4>(),
lr,
beta1,
beta2,
weight_decay,
eps,
maximize,
/* amsgrad */false,
grad_scale_ptr,
found_inf_ptr);
});
}
} // namespace at::native
|
bb7bcf054bcb0276e3f2ff4626747c67ea405440.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
#include <assert.h>
extern "C" {
#include "blas.h"
#include "hip/hip_runtime.h"
#include "utils.h"
}
__global__ void scale_bias_kernel(float *output, float *biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if(offset < size) output[(batch*n+filter)*size + offset] *= biases[filter];
}
void scale_bias_gpu(float *output, float *biases, int batch, int n, int size)
{
dim3 dimGrid((size-1)/BLOCK + 1, n, batch);
dim3 dimBlock(BLOCK, 1, 1);
hipLaunchKernelGGL(( scale_bias_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, output, biases, n, size);
check_error(hipPeekAtLastError());
}
__global__ void backward_scale_kernel(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates)
{
__shared__ float part[BLOCK];
int i,b;
int filter = blockIdx.x;
int p = threadIdx.x;
float sum = 0;
for(b = 0; b < batch; ++b){
for(i = 0; i < size; i += BLOCK){
int index = p + i + size*(filter + n*b);
sum += (p+i < size) ? delta[index]*x_norm[index] : 0;
}
}
part[p] = sum;
__syncthreads();
if (p == 0) {
for(i = 0; i < BLOCK; ++i) scale_updates[filter] += part[i];
}
}
void backward_scale_gpu(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates)
{
hipLaunchKernelGGL(( backward_scale_kernel), dim3(n), dim3(BLOCK), 0, 0, x_norm, delta, batch, n, size, scale_updates);
check_error(hipPeekAtLastError());
}
__global__ void add_bias_kernel(float *output, float *biases, int batch, int n, int size)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= n*size*batch) return;
int i = index % size;
index /= size;
int j = index % n;
index /= n;
int k = index;
output[(k*n+j)*size + i] += biases[j];
}
void add_bias_gpu(float *output, float *biases, int batch, int n, int size)
{
int num = n*size*batch;
hipLaunchKernelGGL(( add_bias_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, 0, output, biases, batch, n, size);
check_error(hipPeekAtLastError());
}
__global__ void backward_bias_conn_kernel(float *bias_updates, float *delta, int batch, int n)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= n) return;
int b;
float sum = 0;
for(b = 0; b < batch; ++b){
int i = b*n + index;
sum += delta[i];
}
bias_updates[index] += sum;
}
__global__ void backward_bias_kernel(float *bias_updates, float *delta, int batch, int n, int size)
{
__shared__ float part[BLOCK];
int i,b;
int filter = blockIdx.x;
int p = threadIdx.x;
float sum = 0;
for(b = 0; b < batch; ++b){
for(i = 0; i < size; i += BLOCK){
int index = p + i + size*(filter + n*b);
sum += (p+i < size) ? delta[index] : 0;
}
}
part[p] = sum;
__syncthreads();
if (p == 0) {
for(i = 0; i < BLOCK; ++i) bias_updates[filter] += part[i];
}
}
void backward_bias_gpu(float *bias_updates, float *delta, int batch, int n, int size)
{
if(size == 1){
hipLaunchKernelGGL(( backward_bias_conn_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, bias_updates, delta, batch, n);
}else{
hipLaunchKernelGGL(( backward_bias_kernel), dim3(n), dim3(BLOCK), 0, 0, bias_updates, delta, batch, n, size);
}
check_error(hipPeekAtLastError());
}
/*
__global__ void dot_kernel(float *output, float scale, int batch, int n, int size, float *delta)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
int f1 = index / n;
int f2 = index % n;
if (f2 <= f1) return;
float sum = 0;
float norm1 = 0;
float norm2 = 0;
int b, i;
for(b = 0; b < batch; ++b){
for(i = 0; i < size; ++i){
int i1 = b * size * n + f1 * size + i;
int i2 = b * size * n + f2 * size + i;
sum += output[i1] * output[i2];
norm1 += output[i1] * output[i1];
norm2 += output[i2] * output[i2];
}
}
norm1 = sqrt(norm1);
norm2 = sqrt(norm2);
float norm = norm1 * norm2;
sum = sum / norm;
for(b = 0; b < batch; ++b){
for(i = 0; i < size; ++i){
int i1 = b * size * n + f1 * size + i;
int i2 = b * size * n + f2 * size + i;
delta[i1] += - scale * sum * output[i2] / norm;
delta[i2] += - scale * sum * output[i1] / norm;
}
}
}
void dot_error_gpu(layer l)
{
dot_kernel<<<cuda_gridsize(l.n*l.n), BLOCK>>>(l.output_gpu, l.dot, l.batch, l.n, l.out_w * l.out_h, l.delta_gpu);
check_error(hipPeekAtLastError());
}
*/
__global__ void adam_kernel(int N, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
float mhat = m[index] / (1.f - powf(B1, t));
float vhat = v[index] / (1.f - powf(B2, t));
x[index] = x[index] + rate * mhat / (sqrtf(vhat) + eps);
}
extern "C" void adam_gpu(int n, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t)
{
hipLaunchKernelGGL(( adam_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, x, m, v, B1, B2, rate, eps, t);
check_error(hipPeekAtLastError());
}
extern "C" void adam_update_gpu(float *w, float *d, float *m, float *v, float B1, float B2, float eps, float decay, float rate, int n, int batch, int t)
{
scal_gpu(n, B1, m, 1);
scal_gpu(n, B2, v, 1);
axpy_gpu(n, -decay*batch, w, 1, d, 1);
axpy_gpu(n, (1-B1), d, 1, m, 1);
mul_gpu(n, d, 1, d, 1);
axpy_gpu(n, (1-B2), d, 1, v, 1);
adam_gpu(n, w, m, v, B1, B2, rate, eps, t);
fill_gpu(n, 0, d, 1);
}
__global__ void normalize_kernel(int N, float *x, float *mean, float *variance, int batch, int filters, int spatial)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
int f = (index/spatial)%filters;
x[index] = (x[index] - mean[f])/(sqrtf(variance[f] + .00001f));
}
__global__ void normalize_delta_kernel(int N, float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
int f = (index/spatial)%filters;
delta[index] = delta[index] * 1.f/(sqrtf(variance[f] + .00001f)) + variance_delta[f] * 2.f * (x[index] - mean[f]) / (spatial * batch) + mean_delta[f]/(spatial*batch);
}
extern "C" void normalize_delta_gpu(float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta)
{
size_t N = batch*filters*spatial;
hipLaunchKernelGGL(( normalize_delta_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, x, mean, variance, mean_delta, variance_delta, batch, filters, spatial, delta);
check_error(hipPeekAtLastError());
}
__global__ void variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= filters) return;
int j,k;
variance_delta[i] = 0;
for(j = 0; j < batch; ++j){
for(k = 0; k < spatial; ++k){
int index = j*filters*spatial + i*spatial + k;
variance_delta[i] += delta[index]*(x[index] - mean[i]);
}
}
variance_delta[i] *= -.5f * powf(variance[i] + .00001f, (float)(-3.f/2.f));
}
__global__ void accumulate_kernel(float *x, int n, int groups, float *sum)
{
int k;
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= groups) return;
sum[i] = 0;
for(k = 0; k < n; ++k){
sum[i] += x[k*groups + i];
}
}
__global__ void fast_mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j = 0; j < batch; ++j){
for(i = 0; i < spatial; i += threads){
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? delta[index] : 0;
}
}
__syncthreads();
if(id == 0){
mean_delta[filter] = 0;
for(i = 0; i < threads; ++i){
mean_delta[filter] += local[i];
}
mean_delta[filter] *= (-1.f/sqrtf(variance[filter] + .00001f));
}
}
__global__ void fast_variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j = 0; j < batch; ++j){
for(i = 0; i < spatial; i += threads){
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? delta[index]*(x[index] - mean[filter]) : 0;
}
}
__syncthreads();
if(id == 0){
variance_delta[filter] = 0;
for(i = 0; i < threads; ++i){
variance_delta[filter] += local[i];
}
variance_delta[filter] *= -.5f * powf(variance[filter] + .00001f, (float)(-3.f/2.f));
}
}
__global__ void mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= filters) return;
int j,k;
mean_delta[i] = 0;
for (j = 0; j < batch; ++j) {
for (k = 0; k < spatial; ++k) {
int index = j*filters*spatial + i*spatial + k;
mean_delta[i] += delta[index];
}
}
mean_delta[i] *= (-1.f/sqrtf(variance[i] + .00001f));
}
extern "C" void mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
{
hipLaunchKernelGGL(( mean_delta_kernel), dim3(cuda_gridsize(filters)), dim3(BLOCK), 0, 0, delta, variance, batch, filters, spatial, mean_delta);
check_error(hipPeekAtLastError());
}
extern "C" void fast_mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
{
hipLaunchKernelGGL(( fast_mean_delta_kernel), dim3(filters), dim3(BLOCK), 0, 0, delta, variance, batch, filters, spatial, mean_delta);
check_error(hipPeekAtLastError());
}
extern "C" void fast_variance_delta_gpu(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
{
hipLaunchKernelGGL(( fast_variance_delta_kernel), dim3(filters), dim3(BLOCK), 0, 0, x, delta, mean, variance, batch, filters, spatial, variance_delta);
check_error(hipPeekAtLastError());
}
__global__ void mean_kernel(float *x, int batch, int filters, int spatial, float *mean)
{
float scale = 1.f/(batch * spatial);
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= filters) return;
int j,k;
mean[i] = 0;
for(j = 0; j < batch; ++j){
for(k = 0; k < spatial; ++k){
int index = j*filters*spatial + i*spatial + k;
mean[i] += x[index];
}
}
mean[i] *= scale;
}
__global__ void variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance)
{
float scale = 1.f/(batch * spatial - 1);
int j,k;
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= filters) return;
variance[i] = 0;
for(j = 0; j < batch; ++j){
for(k = 0; k < spatial; ++k){
int index = j*filters*spatial + i*spatial + k;
variance[i] += powf((x[index] - mean[i]), 2);
}
}
variance[i] *= scale;
}
__global__ void reorg_kernel(int N, float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i >= N) return;
int in_index = i;
int in_w = i%w;
i = i/w;
int in_h = i%h;
i = i/h;
int in_c = i%c;
i = i/c;
int b = i%batch;
int out_c = c/(stride*stride);
int c2 = in_c % out_c;
int offset = in_c / out_c;
int w2 = in_w*stride + offset % stride;
int h2 = in_h*stride + offset / stride;
//printf("%d\n", offset);
int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b));
// printf("%d %d %d\n", w2, h2, c2);
//printf("%d %d\n", in_index, out_index);
//if(out_index >= N || out_index < 0) printf("bad bad bad \n");
if(forward) out[out_index] = x[in_index];
else out[in_index] = x[out_index];
//if(forward) out[1] = x[1];
//else out[0] = x[0];
}
__global__ void axpy_kernel(int N, float ALPHA, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[OFFY+i*INCY] += ALPHA*X[OFFX+i*INCX];
}
__global__ void pow_kernel(int N, float ALPHA, float *X, int INCX, float *Y, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[i*INCY] = pow(X[i*INCX], ALPHA);
}
__global__ void const_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] = ALPHA;
}
__global__ void constrain_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] = fminf(ALPHA, fmaxf(-ALPHA, X[i*INCX]));
}
__global__ void supp_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) {
if((X[i*INCX] * X[i*INCX]) < (ALPHA * ALPHA)) X[i*INCX] = 0;
}
}
__global__ void add_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] += ALPHA;
}
__global__ void scal_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] *= ALPHA;
}
__global__ void fill_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] = ALPHA;
}
__global__ void copy_kernel(int N, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[i*INCY + OFFY] = X[i*INCX + OFFX];
}
__global__ void mul_kernel(int N, float *X, int INCX, float *Y, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[i*INCY] *= X[i*INCX];
}
extern "C" void normalize_gpu(float *x, float *mean, float *variance, int batch, int filters, int spatial)
{
size_t N = batch*filters*spatial;
hipLaunchKernelGGL(( normalize_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, x, mean, variance, batch, filters, spatial);
check_error(hipPeekAtLastError());
}
__global__ void fast_mean_kernel(float *x, int batch, int filters, int spatial, float *mean)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j = 0; j < batch; ++j){
for(i = 0; i < spatial; i += threads){
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? x[index] : 0;
}
}
__syncthreads();
if(id == 0){
mean[filter] = 0;
for(i = 0; i < threads; ++i){
mean[filter] += local[i];
}
mean[filter] /= spatial * batch;
}
}
__global__ void fast_variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j = 0; j < batch; ++j){
for(i = 0; i < spatial; i += threads){
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? powf((x[index] - mean[filter]), 2) : 0;
}
}
__syncthreads();
if(id == 0){
variance[filter] = 0;
for(i = 0; i < threads; ++i){
variance[filter] += local[i];
}
variance[filter] /= (spatial * batch - 1);
}
}
extern "C" void fast_mean_gpu(float *x, int batch, int filters, int spatial, float *mean)
{
hipLaunchKernelGGL(( fast_mean_kernel), dim3(filters), dim3(BLOCK), 0, 0, x, batch, filters, spatial, mean);
check_error(hipPeekAtLastError());
}
extern "C" void fast_variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance)
{
hipLaunchKernelGGL(( fast_variance_kernel), dim3(filters), dim3(BLOCK), 0, 0, x, mean, batch, filters, spatial, variance);
check_error(hipPeekAtLastError());
}
extern "C" void mean_gpu(float *x, int batch, int filters, int spatial, float *mean)
{
hipLaunchKernelGGL(( mean_kernel), dim3(cuda_gridsize(filters)), dim3(BLOCK), 0, 0, x, batch, filters, spatial, mean);
check_error(hipPeekAtLastError());
}
extern "C" void variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance)
{
hipLaunchKernelGGL(( variance_kernel), dim3(cuda_gridsize(filters)), dim3(BLOCK), 0, 0, x, mean, batch, filters, spatial, variance);
check_error(hipPeekAtLastError());
}
extern "C" void axpy_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY)
{
axpy_gpu_offset(N, ALPHA, X, 0, INCX, Y, 0, INCY);
}
extern "C" void pow_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY)
{
hipLaunchKernelGGL(( pow_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX, Y, INCY);
check_error(hipPeekAtLastError());
}
extern "C" void axpy_gpu_offset(int N, float ALPHA, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY)
{
hipLaunchKernelGGL(( axpy_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, OFFX, INCX, Y, OFFY, INCY);
check_error(hipPeekAtLastError());
}
extern "C" void copy_gpu(int N, float * X, int INCX, float * Y, int INCY)
{
copy_gpu_offset(N, X, 0, INCX, Y, 0, INCY);
}
extern "C" void mul_gpu(int N, float * X, int INCX, float * Y, int INCY)
{
hipLaunchKernelGGL(( mul_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, X, INCX, Y, INCY);
check_error(hipPeekAtLastError());
}
extern "C" void copy_gpu_offset(int N, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY)
{
hipLaunchKernelGGL(( copy_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, X, OFFX, INCX, Y, OFFY, INCY);
check_error(hipPeekAtLastError());
}
__global__ void flatten_kernel(int N, float *x, int spatial, int layers, int batch, int forward, float *out)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i >= N) return;
int in_s = i%spatial;
i = i/spatial;
int in_c = i%layers;
i = i/layers;
int b = i;
int i1 = b*layers*spatial + in_c*spatial + in_s;
int i2 = b*layers*spatial + in_s*layers + in_c;
if (forward) out[i2] = x[i1];
else out[i1] = x[i2];
}
extern "C" void flatten_gpu(float *x, int spatial, int layers, int batch, int forward, float *out)
{
int size = spatial*batch*layers;
hipLaunchKernelGGL(( flatten_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, size, x, spatial, layers, batch, forward, out);
check_error(hipPeekAtLastError());
}
extern "C" void reorg_gpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
{
int size = w*h*c*batch;
hipLaunchKernelGGL(( reorg_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, size, x, w, h, c, batch, stride, forward, out);
check_error(hipPeekAtLastError());
}
__global__ void mask_kernel(int n, float *x, float mask_num, float *mask, float val)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n && mask[i] == mask_num) x[i] = val;
}
extern "C" void mask_gpu(int N, float * X, float mask_num, float * mask, float val)
{
hipLaunchKernelGGL(( mask_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, X, mask_num, mask, val);
check_error(hipPeekAtLastError());
}
__global__ void scale_mask_kernel(int n, float *x, float mask_num, float *mask, float scale)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n && mask[i] == mask_num) x[i] *= scale;
}
extern "C" void scale_mask_gpu(int N, float * X, float mask_num, float * mask, float scale)
{
hipLaunchKernelGGL(( scale_mask_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, X, mask_num, mask, scale);
check_error(hipPeekAtLastError());
}
extern "C" void const_gpu(int N, float ALPHA, float * X, int INCX)
{
hipLaunchKernelGGL(( const_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX);
check_error(hipPeekAtLastError());
}
extern "C" void constrain_gpu(int N, float ALPHA, float * X, int INCX)
{
hipLaunchKernelGGL(( constrain_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX);
check_error(hipPeekAtLastError());
}
extern "C" void add_gpu(int N, float ALPHA, float * X, int INCX)
{
hipLaunchKernelGGL(( add_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX);
check_error(hipPeekAtLastError());
}
extern "C" void scal_gpu(int N, float ALPHA, float * X, int INCX)
{
hipLaunchKernelGGL(( scal_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX);
check_error(hipPeekAtLastError());
}
extern "C" void fill_gpu(int N, float ALPHA, float * X, int INCX)
{
hipLaunchKernelGGL(( fill_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX);
check_error(hipPeekAtLastError());
}
__global__ void softmax_x_ent_kernel(int n, float *pred, float *truth, float *delta, float *error)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
float t = truth[i];
float p = pred[i];
error[i] = (t) ? -log(p) : 0;
delta[i] = t-p;
}
}
extern "C" void softmax_x_ent_gpu(int n, float *pred, float *truth, float *delta, float *error)
{
hipLaunchKernelGGL(( softmax_x_ent_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error);
check_error(hipPeekAtLastError());
}
__device__ void softmax_device(float *input, int n, float temp, int stride, float *output)
{
int i;
float sum = 0;
float largest = -INFINITY;
for(i = 0; i < n; ++i){
int val = input[i*stride];
largest = (val>largest) ? val : largest;
}
for(i = 0; i < n; ++i){
float e = expf(input[i*stride]/temp - largest/temp);
sum += e;
output[i*stride] = e;
}
for(i = 0; i < n; ++i){
output[i*stride] /= sum;
}
}
__global__ void softmax_kernel(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= batch*groups) return;
int b = id / groups;
int g = id % groups;
softmax_device(input + b*batch_offset + g*group_offset, n, temp, stride, output + b*batch_offset + g*group_offset);
}
extern "C" void softmax_gpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output)
{
hipLaunchKernelGGL(( softmax_kernel), dim3(cuda_gridsize(batch*groups)), dim3(BLOCK), 0, 0, input, n, batch, batch_offset, groups, group_offset, stride, temp, output);
check_error(hipPeekAtLastError());
}
|
bb7bcf054bcb0276e3f2ff4626747c67ea405440.cu
|
#include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
#include <assert.h>
extern "C" {
#include "blas.h"
#include "cuda.h"
#include "utils.h"
}
__global__ void scale_bias_kernel(float *output, float *biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if(offset < size) output[(batch*n+filter)*size + offset] *= biases[filter];
}
void scale_bias_gpu(float *output, float *biases, int batch, int n, int size)
{
dim3 dimGrid((size-1)/BLOCK + 1, n, batch);
dim3 dimBlock(BLOCK, 1, 1);
scale_bias_kernel<<<dimGrid, dimBlock>>>(output, biases, n, size);
check_error(cudaPeekAtLastError());
}
__global__ void backward_scale_kernel(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates)
{
__shared__ float part[BLOCK];
int i,b;
int filter = blockIdx.x;
int p = threadIdx.x;
float sum = 0;
for(b = 0; b < batch; ++b){
for(i = 0; i < size; i += BLOCK){
int index = p + i + size*(filter + n*b);
sum += (p+i < size) ? delta[index]*x_norm[index] : 0;
}
}
part[p] = sum;
__syncthreads();
if (p == 0) {
for(i = 0; i < BLOCK; ++i) scale_updates[filter] += part[i];
}
}
void backward_scale_gpu(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates)
{
backward_scale_kernel<<<n, BLOCK>>>(x_norm, delta, batch, n, size, scale_updates);
check_error(cudaPeekAtLastError());
}
__global__ void add_bias_kernel(float *output, float *biases, int batch, int n, int size)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= n*size*batch) return;
int i = index % size;
index /= size;
int j = index % n;
index /= n;
int k = index;
output[(k*n+j)*size + i] += biases[j];
}
void add_bias_gpu(float *output, float *biases, int batch, int n, int size)
{
int num = n*size*batch;
add_bias_kernel<<<cuda_gridsize(num), BLOCK>>>(output, biases, batch, n, size);
check_error(cudaPeekAtLastError());
}
__global__ void backward_bias_conn_kernel(float *bias_updates, float *delta, int batch, int n)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= n) return;
int b;
float sum = 0;
for(b = 0; b < batch; ++b){
int i = b*n + index;
sum += delta[i];
}
bias_updates[index] += sum;
}
__global__ void backward_bias_kernel(float *bias_updates, float *delta, int batch, int n, int size)
{
__shared__ float part[BLOCK];
int i,b;
int filter = blockIdx.x;
int p = threadIdx.x;
float sum = 0;
for(b = 0; b < batch; ++b){
for(i = 0; i < size; i += BLOCK){
int index = p + i + size*(filter + n*b);
sum += (p+i < size) ? delta[index] : 0;
}
}
part[p] = sum;
__syncthreads();
if (p == 0) {
for(i = 0; i < BLOCK; ++i) bias_updates[filter] += part[i];
}
}
void backward_bias_gpu(float *bias_updates, float *delta, int batch, int n, int size)
{
if(size == 1){
backward_bias_conn_kernel<<<cuda_gridsize(n), BLOCK>>>(bias_updates, delta, batch, n);
}else{
backward_bias_kernel<<<n, BLOCK>>>(bias_updates, delta, batch, n, size);
}
check_error(cudaPeekAtLastError());
}
/*
__global__ void dot_kernel(float *output, float scale, int batch, int n, int size, float *delta)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
int f1 = index / n;
int f2 = index % n;
if (f2 <= f1) return;
float sum = 0;
float norm1 = 0;
float norm2 = 0;
int b, i;
for(b = 0; b < batch; ++b){
for(i = 0; i < size; ++i){
int i1 = b * size * n + f1 * size + i;
int i2 = b * size * n + f2 * size + i;
sum += output[i1] * output[i2];
norm1 += output[i1] * output[i1];
norm2 += output[i2] * output[i2];
}
}
norm1 = sqrt(norm1);
norm2 = sqrt(norm2);
float norm = norm1 * norm2;
sum = sum / norm;
for(b = 0; b < batch; ++b){
for(i = 0; i < size; ++i){
int i1 = b * size * n + f1 * size + i;
int i2 = b * size * n + f2 * size + i;
delta[i1] += - scale * sum * output[i2] / norm;
delta[i2] += - scale * sum * output[i1] / norm;
}
}
}
void dot_error_gpu(layer l)
{
dot_kernel<<<cuda_gridsize(l.n*l.n), BLOCK>>>(l.output_gpu, l.dot, l.batch, l.n, l.out_w * l.out_h, l.delta_gpu);
check_error(cudaPeekAtLastError());
}
*/
__global__ void adam_kernel(int N, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
float mhat = m[index] / (1.f - powf(B1, t));
float vhat = v[index] / (1.f - powf(B2, t));
x[index] = x[index] + rate * mhat / (sqrtf(vhat) + eps);
}
extern "C" void adam_gpu(int n, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t)
{
adam_kernel<<<cuda_gridsize(n), BLOCK>>>(n, x, m, v, B1, B2, rate, eps, t);
check_error(cudaPeekAtLastError());
}
extern "C" void adam_update_gpu(float *w, float *d, float *m, float *v, float B1, float B2, float eps, float decay, float rate, int n, int batch, int t)
{
scal_gpu(n, B1, m, 1);
scal_gpu(n, B2, v, 1);
axpy_gpu(n, -decay*batch, w, 1, d, 1);
axpy_gpu(n, (1-B1), d, 1, m, 1);
mul_gpu(n, d, 1, d, 1);
axpy_gpu(n, (1-B2), d, 1, v, 1);
adam_gpu(n, w, m, v, B1, B2, rate, eps, t);
fill_gpu(n, 0, d, 1);
}
__global__ void normalize_kernel(int N, float *x, float *mean, float *variance, int batch, int filters, int spatial)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
int f = (index/spatial)%filters;
x[index] = (x[index] - mean[f])/(sqrtf(variance[f] + .00001f));
}
__global__ void normalize_delta_kernel(int N, float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
int f = (index/spatial)%filters;
delta[index] = delta[index] * 1.f/(sqrtf(variance[f] + .00001f)) + variance_delta[f] * 2.f * (x[index] - mean[f]) / (spatial * batch) + mean_delta[f]/(spatial*batch);
}
extern "C" void normalize_delta_gpu(float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta)
{
size_t N = batch*filters*spatial;
normalize_delta_kernel<<<cuda_gridsize(N), BLOCK>>>(N, x, mean, variance, mean_delta, variance_delta, batch, filters, spatial, delta);
check_error(cudaPeekAtLastError());
}
__global__ void variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= filters) return;
int j,k;
variance_delta[i] = 0;
for(j = 0; j < batch; ++j){
for(k = 0; k < spatial; ++k){
int index = j*filters*spatial + i*spatial + k;
variance_delta[i] += delta[index]*(x[index] - mean[i]);
}
}
variance_delta[i] *= -.5f * powf(variance[i] + .00001f, (float)(-3.f/2.f));
}
__global__ void accumulate_kernel(float *x, int n, int groups, float *sum)
{
int k;
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= groups) return;
sum[i] = 0;
for(k = 0; k < n; ++k){
sum[i] += x[k*groups + i];
}
}
__global__ void fast_mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j = 0; j < batch; ++j){
for(i = 0; i < spatial; i += threads){
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? delta[index] : 0;
}
}
__syncthreads();
if(id == 0){
mean_delta[filter] = 0;
for(i = 0; i < threads; ++i){
mean_delta[filter] += local[i];
}
mean_delta[filter] *= (-1.f/sqrtf(variance[filter] + .00001f));
}
}
__global__ void fast_variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j = 0; j < batch; ++j){
for(i = 0; i < spatial; i += threads){
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? delta[index]*(x[index] - mean[filter]) : 0;
}
}
__syncthreads();
if(id == 0){
variance_delta[filter] = 0;
for(i = 0; i < threads; ++i){
variance_delta[filter] += local[i];
}
variance_delta[filter] *= -.5f * powf(variance[filter] + .00001f, (float)(-3.f/2.f));
}
}
__global__ void mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= filters) return;
int j,k;
mean_delta[i] = 0;
for (j = 0; j < batch; ++j) {
for (k = 0; k < spatial; ++k) {
int index = j*filters*spatial + i*spatial + k;
mean_delta[i] += delta[index];
}
}
mean_delta[i] *= (-1.f/sqrtf(variance[i] + .00001f));
}
extern "C" void mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
{
mean_delta_kernel<<<cuda_gridsize(filters), BLOCK>>>(delta, variance, batch, filters, spatial, mean_delta);
check_error(cudaPeekAtLastError());
}
extern "C" void fast_mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
{
fast_mean_delta_kernel<<<filters, BLOCK>>>(delta, variance, batch, filters, spatial, mean_delta);
check_error(cudaPeekAtLastError());
}
extern "C" void fast_variance_delta_gpu(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
{
fast_variance_delta_kernel<<<filters, BLOCK>>>(x, delta, mean, variance, batch, filters, spatial, variance_delta);
check_error(cudaPeekAtLastError());
}
__global__ void mean_kernel(float *x, int batch, int filters, int spatial, float *mean)
{
float scale = 1.f/(batch * spatial);
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= filters) return;
int j,k;
mean[i] = 0;
for(j = 0; j < batch; ++j){
for(k = 0; k < spatial; ++k){
int index = j*filters*spatial + i*spatial + k;
mean[i] += x[index];
}
}
mean[i] *= scale;
}
__global__ void variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance)
{
float scale = 1.f/(batch * spatial - 1);
int j,k;
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= filters) return;
variance[i] = 0;
for(j = 0; j < batch; ++j){
for(k = 0; k < spatial; ++k){
int index = j*filters*spatial + i*spatial + k;
variance[i] += powf((x[index] - mean[i]), 2);
}
}
variance[i] *= scale;
}
__global__ void reorg_kernel(int N, float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i >= N) return;
int in_index = i;
int in_w = i%w;
i = i/w;
int in_h = i%h;
i = i/h;
int in_c = i%c;
i = i/c;
int b = i%batch;
int out_c = c/(stride*stride);
int c2 = in_c % out_c;
int offset = in_c / out_c;
int w2 = in_w*stride + offset % stride;
int h2 = in_h*stride + offset / stride;
//printf("%d\n", offset);
int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b));
// printf("%d %d %d\n", w2, h2, c2);
//printf("%d %d\n", in_index, out_index);
//if(out_index >= N || out_index < 0) printf("bad bad bad \n");
if(forward) out[out_index] = x[in_index];
else out[in_index] = x[out_index];
//if(forward) out[1] = x[1];
//else out[0] = x[0];
}
__global__ void axpy_kernel(int N, float ALPHA, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[OFFY+i*INCY] += ALPHA*X[OFFX+i*INCX];
}
__global__ void pow_kernel(int N, float ALPHA, float *X, int INCX, float *Y, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[i*INCY] = pow(X[i*INCX], ALPHA);
}
__global__ void const_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] = ALPHA;
}
__global__ void constrain_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] = fminf(ALPHA, fmaxf(-ALPHA, X[i*INCX]));
}
__global__ void supp_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) {
if((X[i*INCX] * X[i*INCX]) < (ALPHA * ALPHA)) X[i*INCX] = 0;
}
}
__global__ void add_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] += ALPHA;
}
__global__ void scal_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] *= ALPHA;
}
__global__ void fill_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] = ALPHA;
}
__global__ void copy_kernel(int N, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[i*INCY + OFFY] = X[i*INCX + OFFX];
}
__global__ void mul_kernel(int N, float *X, int INCX, float *Y, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[i*INCY] *= X[i*INCX];
}
extern "C" void normalize_gpu(float *x, float *mean, float *variance, int batch, int filters, int spatial)
{
size_t N = batch*filters*spatial;
normalize_kernel<<<cuda_gridsize(N), BLOCK>>>(N, x, mean, variance, batch, filters, spatial);
check_error(cudaPeekAtLastError());
}
__global__ void fast_mean_kernel(float *x, int batch, int filters, int spatial, float *mean)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j = 0; j < batch; ++j){
for(i = 0; i < spatial; i += threads){
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? x[index] : 0;
}
}
__syncthreads();
if(id == 0){
mean[filter] = 0;
for(i = 0; i < threads; ++i){
mean[filter] += local[i];
}
mean[filter] /= spatial * batch;
}
}
__global__ void fast_variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j = 0; j < batch; ++j){
for(i = 0; i < spatial; i += threads){
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? powf((x[index] - mean[filter]), 2) : 0;
}
}
__syncthreads();
if(id == 0){
variance[filter] = 0;
for(i = 0; i < threads; ++i){
variance[filter] += local[i];
}
variance[filter] /= (spatial * batch - 1);
}
}
extern "C" void fast_mean_gpu(float *x, int batch, int filters, int spatial, float *mean)
{
fast_mean_kernel<<<filters, BLOCK>>>(x, batch, filters, spatial, mean);
check_error(cudaPeekAtLastError());
}
extern "C" void fast_variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance)
{
fast_variance_kernel<<<filters, BLOCK>>>(x, mean, batch, filters, spatial, variance);
check_error(cudaPeekAtLastError());
}
extern "C" void mean_gpu(float *x, int batch, int filters, int spatial, float *mean)
{
mean_kernel<<<cuda_gridsize(filters), BLOCK>>>(x, batch, filters, spatial, mean);
check_error(cudaPeekAtLastError());
}
extern "C" void variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance)
{
variance_kernel<<<cuda_gridsize(filters), BLOCK>>>(x, mean, batch, filters, spatial, variance);
check_error(cudaPeekAtLastError());
}
extern "C" void axpy_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY)
{
axpy_gpu_offset(N, ALPHA, X, 0, INCX, Y, 0, INCY);
}
extern "C" void pow_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY)
{
pow_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX, Y, INCY);
check_error(cudaPeekAtLastError());
}
extern "C" void axpy_gpu_offset(int N, float ALPHA, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY)
{
axpy_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, OFFX, INCX, Y, OFFY, INCY);
check_error(cudaPeekAtLastError());
}
extern "C" void copy_gpu(int N, float * X, int INCX, float * Y, int INCY)
{
copy_gpu_offset(N, X, 0, INCX, Y, 0, INCY);
}
extern "C" void mul_gpu(int N, float * X, int INCX, float * Y, int INCY)
{
mul_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, INCX, Y, INCY);
check_error(cudaPeekAtLastError());
}
extern "C" void copy_gpu_offset(int N, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY)
{
copy_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, OFFX, INCX, Y, OFFY, INCY);
check_error(cudaPeekAtLastError());
}
__global__ void flatten_kernel(int N, float *x, int spatial, int layers, int batch, int forward, float *out)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i >= N) return;
int in_s = i%spatial;
i = i/spatial;
int in_c = i%layers;
i = i/layers;
int b = i;
int i1 = b*layers*spatial + in_c*spatial + in_s;
int i2 = b*layers*spatial + in_s*layers + in_c;
if (forward) out[i2] = x[i1];
else out[i1] = x[i2];
}
extern "C" void flatten_gpu(float *x, int spatial, int layers, int batch, int forward, float *out)
{
int size = spatial*batch*layers;
flatten_kernel<<<cuda_gridsize(size), BLOCK>>>(size, x, spatial, layers, batch, forward, out);
check_error(cudaPeekAtLastError());
}
extern "C" void reorg_gpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
{
int size = w*h*c*batch;
reorg_kernel<<<cuda_gridsize(size), BLOCK>>>(size, x, w, h, c, batch, stride, forward, out);
check_error(cudaPeekAtLastError());
}
__global__ void mask_kernel(int n, float *x, float mask_num, float *mask, float val)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n && mask[i] == mask_num) x[i] = val;
}
extern "C" void mask_gpu(int N, float * X, float mask_num, float * mask, float val)
{
mask_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, mask_num, mask, val);
check_error(cudaPeekAtLastError());
}
__global__ void scale_mask_kernel(int n, float *x, float mask_num, float *mask, float scale)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n && mask[i] == mask_num) x[i] *= scale;
}
extern "C" void scale_mask_gpu(int N, float * X, float mask_num, float * mask, float scale)
{
scale_mask_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, mask_num, mask, scale);
check_error(cudaPeekAtLastError());
}
extern "C" void const_gpu(int N, float ALPHA, float * X, int INCX)
{
const_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX);
check_error(cudaPeekAtLastError());
}
extern "C" void constrain_gpu(int N, float ALPHA, float * X, int INCX)
{
constrain_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX);
check_error(cudaPeekAtLastError());
}
extern "C" void add_gpu(int N, float ALPHA, float * X, int INCX)
{
add_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX);
check_error(cudaPeekAtLastError());
}
extern "C" void scal_gpu(int N, float ALPHA, float * X, int INCX)
{
scal_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX);
check_error(cudaPeekAtLastError());
}
extern "C" void fill_gpu(int N, float ALPHA, float * X, int INCX)
{
fill_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX);
check_error(cudaPeekAtLastError());
}
__global__ void softmax_x_ent_kernel(int n, float *pred, float *truth, float *delta, float *error)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
float t = truth[i];
float p = pred[i];
error[i] = (t) ? -log(p) : 0;
delta[i] = t-p;
}
}
extern "C" void softmax_x_ent_gpu(int n, float *pred, float *truth, float *delta, float *error)
{
softmax_x_ent_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error);
check_error(cudaPeekAtLastError());
}
__device__ void softmax_device(float *input, int n, float temp, int stride, float *output)
{
int i;
float sum = 0;
float largest = -INFINITY;
for(i = 0; i < n; ++i){
int val = input[i*stride];
largest = (val>largest) ? val : largest;
}
for(i = 0; i < n; ++i){
float e = expf(input[i*stride]/temp - largest/temp);
sum += e;
output[i*stride] = e;
}
for(i = 0; i < n; ++i){
output[i*stride] /= sum;
}
}
__global__ void softmax_kernel(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= batch*groups) return;
int b = id / groups;
int g = id % groups;
softmax_device(input + b*batch_offset + g*group_offset, n, temp, stride, output + b*batch_offset + g*group_offset);
}
extern "C" void softmax_gpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output)
{
softmax_kernel<<<cuda_gridsize(batch*groups), BLOCK>>>(input, n, batch, batch_offset, groups, group_offset, stride, temp, output);
check_error(cudaPeekAtLastError());
}
|
e9699ae50abba886eb754cd5af5ea93225b451f3.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* BSD 2-Clause License
*
* Copyright (c) 2020, Alessandro Capotondi
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file saxpy.c
* @author Alessandro Capotondi
* @date 12 May 2020
* @brief Saxpy
*
* @see https://dolly.fim.unimore.it/2019/course/view.php?id=152
*/
#include <assert.h>
#include <time.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#define gpuErrchk(ans) \
{ \
gpuAssert((ans), __FILE__, __LINE__); \
}
static inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
extern "C"
{
#include "utils.h"
}
#define TWO02 (1 << 2)
#define TWO04 (1 << 4)
#define TWO08 (1 << 8)
#ifndef N
#define N (1 << 27)
#endif
#ifndef BLOCK_SIZE
#define BLOCK_SIZE (512)
#endif
/*
*SAXPY (host implementation)
* y := a * x + y
*/
void host_saxpy(float *__restrict__ y, float a, float *__restrict__ x, int n)
{
#pragma omp parallel for simd schedule(simd \
: static)
for (int i = 0; i < n; i++)
{
y[i] = a * x[i] + y[i];
}
}
__global__ void gpu_saxpy(float *__restrict__ y, float a, float *__restrict__ x, int n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
y[i] = a * x[i] + y[i];
}
int main(int argc, const char **argv)
{
int iret = 0;
int n = N;
float *h_x, *d_x;
float *h_y, *d_y;
float *h_z;
float a = 101.0f / TWO02,
b, c;
if (argc > 1)
n = atoi(argv[1]);
//TODO Update malloc to hipHostMalloc or hipMallocManaged (if necessary)
gpuErrchk(hipHostMalloc((void **)&h_x, sizeof(float) * n));
//TODO Update malloc to hipHostMalloc or hipMallocManaged (if necessary)
gpuErrchk(hipHostMalloc((void **)&h_y, sizeof(float) * n));
if (NULL == (h_z = (float *)malloc(sizeof(float) * n)))
{
printf("error: memory allocation for 'z'\n");
iret = -1;
}
if (0 != iret)
{
//TODO Update hipHostFree or hipFree (if necessary)
hipHostFree(h_x);
//TODO Update hipHostFree or hipFree (if necessary)
hipHostFree(h_y);
free(h_z);
exit(EXIT_FAILURE);
}
//Init Data
b = rand() % TWO04;
c = rand() % TWO08;
for (int i = 0; i < n; i++)
{
h_x[i] = b / (float)TWO02;
h_y[i] = h_z[i] = c / (float)TWO04;
}
//TODO Remove if unecessary
gpuErrchk(hipMalloc((void **)&d_x, sizeof(float) * n));
gpuErrchk(hipMalloc((void **)&d_y, sizeof(float) * n));
start_timer();
//TODO Remove if unecessary
gpuErrchk(hipMemcpy(d_x, h_x, sizeof(float) * n, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_y, h_y, sizeof(float) * n, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( gpu_saxpy), dim3(((n + BLOCK_SIZE - 1) / BLOCK_SIZE)), dim3(BLOCK_SIZE), 0, 0, d_y, a, d_x, n);
gpuErrchk(hipPeekAtLastError());
//TODO Remove if unecessary
gpuErrchk(hipMemcpy(h_y, d_y, sizeof(float) * n, hipMemcpyDeviceToHost));
stop_timer();
printf("saxpy (GPU): %9.3f sec %9.1f GFLOPS\n", elapsed_ns() / 1.0e9, 2 * n / ((float)elapsed_ns()));
//Check Matematical Consistency
start_timer();
host_saxpy(h_z, a, h_x, n);
stop_timer();
printf("saxpy (Host): %9.3f sec %9.1f GFLOPS\n", elapsed_ns() / 1.0e9, 2 * n / ((float)elapsed_ns()));
for (int i = 0; i < n; ++i)
{
iret = *(int *)(h_y + i) ^ *(int *)(h_z + i);
assert(iret == 0);
}
//TODO Update hipHostFree or hipFree (if necessary)
gpuErrchk(hipHostFree((void *)h_x));
gpuErrchk(hipFree(d_x));
//TODO Update hipHostFree or hipFree (if necessary)
gpuErrchk(hipHostFree((void *)h_y));
gpuErrchk(hipFree(d_y));
free(h_z);
// CUDA exit -- needed to flush printf write buffer
hipDeviceReset();
return 0;
}
|
e9699ae50abba886eb754cd5af5ea93225b451f3.cu
|
/*
* BSD 2-Clause License
*
* Copyright (c) 2020, Alessandro Capotondi
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file saxpy.c
* @author Alessandro Capotondi
* @date 12 May 2020
* @brief Saxpy
*
* @see https://dolly.fim.unimore.it/2019/course/view.php?id=152
*/
#include <assert.h>
#include <time.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda_runtime.h>
#define gpuErrchk(ans) \
{ \
gpuAssert((ans), __FILE__, __LINE__); \
}
static inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
extern "C"
{
#include "utils.h"
}
#define TWO02 (1 << 2)
#define TWO04 (1 << 4)
#define TWO08 (1 << 8)
#ifndef N
#define N (1 << 27)
#endif
#ifndef BLOCK_SIZE
#define BLOCK_SIZE (512)
#endif
/*
*SAXPY (host implementation)
* y := a * x + y
*/
void host_saxpy(float *__restrict__ y, float a, float *__restrict__ x, int n)
{
#pragma omp parallel for simd schedule(simd \
: static)
for (int i = 0; i < n; i++)
{
y[i] = a * x[i] + y[i];
}
}
__global__ void gpu_saxpy(float *__restrict__ y, float a, float *__restrict__ x, int n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
y[i] = a * x[i] + y[i];
}
int main(int argc, const char **argv)
{
int iret = 0;
int n = N;
float *h_x, *d_x;
float *h_y, *d_y;
float *h_z;
float a = 101.0f / TWO02,
b, c;
if (argc > 1)
n = atoi(argv[1]);
//TODO Update malloc to cudaMallocHost or cudaMallocManaged (if necessary)
gpuErrchk(cudaMallocHost((void **)&h_x, sizeof(float) * n));
//TODO Update malloc to cudaMallocHost or cudaMallocManaged (if necessary)
gpuErrchk(cudaMallocHost((void **)&h_y, sizeof(float) * n));
if (NULL == (h_z = (float *)malloc(sizeof(float) * n)))
{
printf("error: memory allocation for 'z'\n");
iret = -1;
}
if (0 != iret)
{
//TODO Update cudaFreeHost or cudaFree (if necessary)
cudaFreeHost(h_x);
//TODO Update cudaFreeHost or cudaFree (if necessary)
cudaFreeHost(h_y);
free(h_z);
exit(EXIT_FAILURE);
}
//Init Data
b = rand() % TWO04;
c = rand() % TWO08;
for (int i = 0; i < n; i++)
{
h_x[i] = b / (float)TWO02;
h_y[i] = h_z[i] = c / (float)TWO04;
}
//TODO Remove if unecessary
gpuErrchk(cudaMalloc((void **)&d_x, sizeof(float) * n));
gpuErrchk(cudaMalloc((void **)&d_y, sizeof(float) * n));
start_timer();
//TODO Remove if unecessary
gpuErrchk(cudaMemcpy(d_x, h_x, sizeof(float) * n, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_y, h_y, sizeof(float) * n, cudaMemcpyHostToDevice));
gpu_saxpy<<<((n + BLOCK_SIZE - 1) / BLOCK_SIZE), BLOCK_SIZE>>>(d_y, a, d_x, n);
gpuErrchk(cudaPeekAtLastError());
//TODO Remove if unecessary
gpuErrchk(cudaMemcpy(h_y, d_y, sizeof(float) * n, cudaMemcpyDeviceToHost));
stop_timer();
printf("saxpy (GPU): %9.3f sec %9.1f GFLOPS\n", elapsed_ns() / 1.0e9, 2 * n / ((float)elapsed_ns()));
//Check Matematical Consistency
start_timer();
host_saxpy(h_z, a, h_x, n);
stop_timer();
printf("saxpy (Host): %9.3f sec %9.1f GFLOPS\n", elapsed_ns() / 1.0e9, 2 * n / ((float)elapsed_ns()));
for (int i = 0; i < n; ++i)
{
iret = *(int *)(h_y + i) ^ *(int *)(h_z + i);
assert(iret == 0);
}
//TODO Update cudaFreeHost or cudaFree (if necessary)
gpuErrchk(cudaFreeHost((void *)h_x));
gpuErrchk(cudaFree(d_x));
//TODO Update cudaFreeHost or cudaFree (if necessary)
gpuErrchk(cudaFreeHost((void *)h_y));
gpuErrchk(cudaFree(d_y));
free(h_z);
// CUDA exit -- needed to flush printf write buffer
cudaDeviceReset();
return 0;
}
|
7696aa25237648ec5e3ce33b18cad50ca72a94d3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "../NativeOps.h"
#include <hip/hip_runtime.h>
#include <cuda_launch_config.h>
#include <buffer.h>
#include <helpers/shape.h>
#include "../Environment.h"
#include <helpers/TAD.h>
#include <ops/specials.h>
#include <loops/reduce3.h>
#include <loops/reduce.h>
#include <loops/indexreduce.h>
#include <loops/pairwise_transform.h>
#include <loops/transform.h>
#include <loops/scalar.h>
#include <loops/broadcasting.h>
#include <loops/summarystatsreduce.h>
#include <loops/random.h>
//#include <thread>
#include <map>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <cuda_device_runtime_api.h>
#include <pointercast.h>
#include <stdio.h>
#include <stdlib.h>
#include <loops/type_conversions.h>
#include <op_boilerplate.h>
#include <loops/grid_shaped.h>
#include <loops/grid_strided.h>
#include <loops/aggregates.h>
#include <helpers/threshold.h>
#include <ShapeList.h>
#include <Context.h>
#include <ops/specials_cuda.h>
// FIXME: we need cuda-specific implementations
#include <helpers/logger.h>
#include <NDArray.h>
#include <NDArrayFactory.h>
#include <GraphExecutioner.h>
#include <graph/GraphHolder.h>
#include <graph/VariablesSet.h>
#include <ops/declarable/OpRegistrator.h>
#include <ops/declarable/CustomOperations.h>
//#include <sys/time.h>
// b40c only available for gcc :(
#ifdef __clang__
// do nothing
#elif __GNUC__
#include <b40c/util/error_utils.cuh>
#include <b40c/util/multiple_buffering.cuh>
#include <b40c/radix_sort/enactor.cuh>
#endif
#include <hiprand/hiprand.h>
#include <Status.h>
#include <helpers/DebugHelper.h>
using namespace nd4j;
#include <loops/special_kernels.h>
hipDeviceProp_t *deviceProperties;
hipFuncAttributes *funcAttributes = new hipFuncAttributes[64];
int blockLimit = 128;
int maxThreads = 512;
bool allowedP2P = false;
bool supportedP2P = false;
#ifdef __EXPERIMENTAL__
bool experimentalSupport = true;
#else
bool experimentalSupport = false;
#endif
int minThreads = 32;
__constant__ char deviceConstantMemory[49152];
typedef struct {
long streamId;
long callId;
} __syncInfo;
typedef __syncInfo SyncInfo;
// this method isn't used, left here for legacy and caution purposes
// TLDR: don't use this way, it sucks
void CUDART_CB syncCallback(hipStream_t stream, hipError_t status, void *data){
SyncInfo *sync = reinterpret_cast<SyncInfo *>(data);
printf("Finished stream: [%i], kernel call: [%i]\n", sync->streamId, sync->callId);
}
// this method just does type conversion in fancy way
int getDeviceId(Nd4jPointer ptrToDeviceId) {
return (int)(Nd4jLong)ptrToDeviceId;
}
template <typename T>
dim3 getOptimalDimensions(Nd4jLong n,hipFuncAttributes attributes, hipDeviceProp_t properties) {
// we can combine the two to compute a block size
int num_threads = block_size_with_maximum_potential_occupancy(attributes, properties);
// no real sense launching more threads, then number of elements we have
if (num_threads > n) num_threads = n;
if (maxThreads > 0 && num_threads > maxThreads) num_threads = maxThreads;
// compute the number of blocks of size num_threads to launch
int num_blocks = n / num_threads;
// check for partial block at the end
if (num_blocks > blockLimit) num_blocks = blockLimit;
if (num_blocks < 4 && n > 128) {
num_blocks = 4;
num_threads = n / num_blocks;
}
if (num_threads >= 768) {
num_blocks = num_blocks * 2;
num_threads = num_threads / 2;
}
if(n % num_threads && num_blocks < blockLimit) ++num_blocks;
//(num_threads * sizeof(T)) + attributes.sharedSizeBytes);
return dim3(num_blocks,num_threads, 3000);
}
int getBaseMemorySize(int xRank, hipFuncAttributes funcAttr) {
int memory_limit = 256; //funcAttr.sharedSizeBytes;
// TODO: remove this later
memory_limit += sizeof(UnifiedSharedMemory) + 32; // sizeof(shape::TAD) + (xRank * 4 * 4)
/*
if (xRank == 0) xRank = 2;
memory_limit += (xRank * 2 + 4) * 3 * 4; // we reserve memory for xShape + T1/T2 shapes
memory_limit += yRank == 0 ? 0 : (yRank * 2 + 4) * 4;
memory_limit += zRank == 0 ? 0 : (zRank * 2 + 4) * 4;
memory_limit += (xRank * 4) * 6;
memory_limit += MAX_RANK * 4; // special case, needed roughtly in one pase
*/
return memory_limit;
}
/*
* Basic CUDA constants here: number of blocks per MP
*/
int getDeviceBlockThreshold(int deviceId) {
int ccMinor = deviceProperties[deviceId].minor;
int ccMajor = deviceProperties[deviceId].major;
int blockThreshold = 8;
if (ccMajor >= 5)
blockThreshold = 32;
else if (ccMajor == 3)
blockThreshold = 16;
else if (ccMajor < 3)
blockThreshold = 8;
return blockThreshold;
}
dim3 getBasicLaunchParams(int deviceId, long problemLength, int sharedMemoryPerThread, hipFuncAttributes funcAttr) {
int countMP = deviceProperties[deviceId].multiProcessorCount;
int blockThreshold = getDeviceBlockThreshold(deviceId);
int num_threads = problemLength / (countMP * blockThreshold);
num_threads = nd4j::math::nd4j_min<int>(num_threads, maxThreads);
num_threads = nd4j::math::nd4j_max<int>(num_threads, 64);
num_threads = nd4j::math::nd4j_max<int>(num_threads, minThreads);
int num_blocks = nd4j::math::nd4j_max<int>(problemLength / num_threads, 1);
num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit);
int memory_limit = (sharedMemoryPerThread * num_threads) + getBaseMemorySize(1, funcAttr);
dim3 launchDims = dim3(num_blocks, num_threads, memory_limit);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Preliminary basic launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i]\n", num_blocks, num_threads, memory_limit);
return launchDims;
}
/*
* This message returns shared memory threshold value. default overflow ratio is 0.3
*/
int getDeviceSharedThreshold(int deviceId) {
int ccMinor = deviceProperties[deviceId].minor;
int ccMajor = deviceProperties[deviceId].major;
// please note threshold isn't multiple of 32, and that's NOT a mistake
int shmemThreshold;
if (ccMajor == 6 && ccMinor == 0)
shmemThreshold = 65536;
else if (ccMajor == 6 && ccMinor == 1)
shmemThreshold = 49152;
else if (ccMajor == 5 && ccMinor == 2)
shmemThreshold = 98304;
else if (ccMajor == 5)
shmemThreshold = 65536;
else if (ccMajor == 3 && ccMinor == 7)
shmemThreshold = 114688;
else shmemThreshold = 49152;
return shmemThreshold / 0.3;
}
dim3 getBetterDimensions(int deviceId, int numTads, int tadLength, int xRank, hipFuncAttributes funcAttr, int dimensionLength, int elementSize, int reduction) {
int num_threads = nd4j::math::nd4j_min<int>(tadLength, maxThreads);
int countMP = deviceProperties[deviceId].multiProcessorCount;
int regPerBlock = deviceProperties[deviceId].regsPerBlock;
int warpSize = deviceProperties[deviceId].warpSize;
int blockThreshold = getDeviceBlockThreshold(deviceId);
int shmemThreshold = getDeviceSharedThreshold(deviceId);
// round num_threads to nearest warpSize
num_threads -= num_threads % warpSize;
num_threads = nd4j::math::nd4j_max<int>(1, num_threads);
if (num_threads < warpSize && tadLength < warpSize)
num_threads = tadLength;
// since we use shared memory as fast memory for some cases - we need to count that in
int memory_limit = getBaseMemorySize(xRank, funcAttr);
int memory_floor = memory_limit;
int effective_block_limit = countMP * blockThreshold;
int num_blocks = numTads; //nd4j::math::nd4j_min<int>(numTads, effective_block_limit);
int desiredShared = shmemThreshold / nd4j::math::nd4j_max<int>((num_blocks / countMP), 1);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Launch context: numBlocks: [%i], numThreads: [%i], countMap: [%i], shmemThreshold: [%i], desiredShared: [%i], elementSize: [%i]\n", num_blocks, num_threads, countMP, shmemThreshold, desiredShared, elementSize);
// at this moment we've stored all required information for things. time to count in reduction multipliers
int reduction_per_block = 0;
bool found = false;
if (reduction > 0)
while (!found) {
reduction_per_block = (num_threads * elementSize * reduction);
if (memory_limit + reduction_per_block < desiredShared) {
memory_limit += reduction_per_block;
found = true;
} else {
if (num_threads > minThreads) {
num_threads -= 32;
} else {
memory_limit += reduction_per_block;
found = true;
}
}
}
// at this moment we know total memory used per block, and we also know per-mp limit.
int max_active_blocks = shmemThreshold / nd4j::math::nd4j_max<int>(memory_limit, 1);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("MAB: [%i], memory_floor: [%i], memory_limit: [%i], reductionPerBlock: [%i]\n", max_active_blocks, memory_floor, memory_limit, reduction_per_block);
// we don't want to spawn more blocks, that gpu can actually handle without queue
//num_blocks = nd4j::math::nd4j_min<int>(num_blocks, max_active_blocks);
num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit);
// if (num_blocks > countMP)
// num_blocks = num_blocks - (num_blocks % countMP);
num_blocks = nd4j::math::nd4j_max<int>(num_blocks, 1);
int targetBlocksPerMP = num_blocks / countMP;
// now we know desired number of blocks wrt to shared memory. So, now we should take in account number of threads per SM
if (targetBlocksPerMP * num_threads > 2048) {
while (targetBlocksPerMP * num_threads > 2048) {
if (num_threads <= minThreads)
break;
num_threads -= 32;
}
reduction_per_block = (num_threads * elementSize * reduction);
memory_limit = memory_floor + reduction_per_block;
}
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Preliminary reduce launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i], reduction_per_block: [%i], blocksPerMP: [%i]\n", num_blocks, num_threads, memory_limit, reduction_per_block, targetBlocksPerMP);
return dim3(num_blocks,num_threads, memory_limit);
}
/*
* This method returns kernel launch param for linear memory access
*/
dim3 getFlatLaunchParams(int deviceId, Nd4jLong *xShapeInfo, Nd4jLong *yShapeInfo, hipFuncAttributes funcAttr) {
auto xRank = shape::rank(xShapeInfo);
auto yRank = yShapeInfo == nullptr ? 0 : shape::rank(yShapeInfo);
auto zRank = 0;
int memory_limit = getBaseMemorySize(xRank, funcAttr);
int countMP = deviceProperties[deviceId].multiProcessorCount;
int regPerBlock = deviceProperties[deviceId].regsPerBlock;
int blockThreshold = getDeviceBlockThreshold(deviceId);
int shmemThreshold = getDeviceSharedThreshold(deviceId);
auto xLength = shape::length(xShapeInfo);
int effective_block_limit = countMP * blockThreshold;
// for flat calls we just want as much concurrent blocks, as possible, and we're not tied to TAD here
int num_threads = xLength / effective_block_limit;
if (num_threads < minThreads)
num_threads = minThreads;
num_threads = num_threads - (num_threads % 32);
int memory_floor = memory_limit;
int num_blocks = xLength / num_threads;
num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit);
// num_blocks = nd4j::math::nd4j_min<int>(num_blocks, effective_block_limit);
num_blocks = nd4j::math::nd4j_max<int>(num_blocks, 1);
int targetBlocksPerMP = num_blocks / countMP;
// now we know desired number of blocks wrt to shared memory. So, now we should take in account number of threads per SM
if (targetBlocksPerMP * num_threads > 2048 && num_threads >= 128) {
while (targetBlocksPerMP * num_threads > 2048) {
if (num_threads <= minThreads)
break;
num_threads -= 32;
}
}
if (xLength / num_threads > blockLimit)
num_blocks *= 2;
dim3 launchDims = dim3(num_blocks, num_threads, memory_limit);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Preliminary scalar launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i], blocksPerMP: [%i], problemLength: [%i], effectiveBlockLimit: [%i]\n", num_blocks, num_threads, memory_limit, targetBlocksPerMP, xLength, effective_block_limit);
return launchDims;
}
/**
* This method returns kernel launch params with TAD-based memory access
*
* @param deviceId
* @param xShapeInfo
* @param tadShapeInfo
* @param funcAttr
* @param dimensionLength
* @param elementSize
* @param reductionSize
* @return
*/
dim3 getReduceLaunchParams(int deviceId, Nd4jLong *xShapeInfo, Nd4jLong *tadShapeInfo, hipFuncAttributes funcAttr, int dimensionLength, int elementSize, int reductionSize) {
Nd4jLong tadLength = 0;
Nd4jLong numTads = 0;
if (tadShapeInfo != nullptr) {
tadLength = shape::length(tadShapeInfo);
numTads = shape::length(xShapeInfo) / tadLength;
if (tadLength == 1) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("A xLength: [%i], zLength: [%i]\n", shape::length(xShapeInfo), shape::length(tadShapeInfo));
}
} else{
// we have special case - reduction along all dimensions
tadLength = nd4j::math::nd4j_min<int>(shape::length(xShapeInfo), 768);
numTads = shape::length(xShapeInfo) / tadLength;
}
auto xRank = shape::rank(xShapeInfo);
int zRank = tadShapeInfo == nullptr ? 0 : shape::rank(tadShapeInfo);
dim3 launchDims = getBetterDimensions(deviceId, numTads, tadLength, xRank, funcAttr, dimensionLength, elementSize, reductionSize);
if (nd4j::Environment::getInstance()->isDebugAndVerbose()) { //|| launchDims.x == 1
printf("Reduce LaunchParams: xLength: [%i], numTads: [%i], tadLength: [%i], launchDims.x: [%i], launchDims.y: [%i], launchDims.z: [%i]\n", shape::length(xShapeInfo), numTads, tadLength, launchDims.x, launchDims.y, launchDims.z);
}
return launchDims;
}
/**
* Returns optimal launch parameters
* given the extra pointers passed in.
* The extra pointer should be
* the host pointer for the shape information
* associated with the data.
* From there it is used to obtain the length
* from which we can derive the optimal launch parameters.
*
*/
template <typename T>
dim3 getOptimalLaunchParameters(Nd4jPointer *extraPointers, hipFuncAttributes attributes, hipDeviceProp_t properties) {
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto n = shape::length(hostXShapeInfo);
dim3 launchDims = getOptimalDimensions<T>(n,attributes, properties);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Params: gridSize: [%i], blockSize: [%i], shMem: [%i], problemLength: [%i], totalThreads:[%i]\n", launchDims.x, launchDims.y, launchDims.z, n, (launchDims.x * launchDims.y));
return launchDims;
}
nd4j::buffer::Buffer<Nd4jLong> * createScalarBuffer(hipStream_t stream) {
Nd4jLong *scalarShapeInfo = shape::createScalarShapeInfo();
nd4j::buffer::Buffer<Nd4jLong> *buff = nd4j::buffer::createBuffer(scalarShapeInfo,shape::shapeInfoLength(2), stream);
nd4j::buffer::copyDataToGpu(&buff, stream);
return buff;
}
class ScalarShapeInformation {
private:
nd4j::buffer::Buffer<Nd4jLong> *scalarDimension;
nd4j::buffer::Buffer<Nd4jLong> *scalarShapeInfo;
// std::thread::id threadId;
public:
ScalarShapeInformation(hipStream_t stream) {
auto scalarDimensionBuff = reinterpret_cast<Nd4jLong *>(malloc(sizeof(Nd4jLong)));
CHECK_ALLOC(scalarDimensionBuff, "Failed to allocate ShapeInfoBuffer");
scalarDimensionBuff[0] = MAX_DIMENSION;
scalarDimension = nd4j::buffer::createBuffer(scalarDimensionBuff,1, stream);
scalarShapeInfo = createScalarBuffer(stream);
// threadId = std::this_thread::get_id();
}
~ScalarShapeInformation() {
nd4j::buffer::freeBuffer(&scalarShapeInfo);
nd4j::buffer::freeBuffer(&scalarDimension);
}
Nd4jLong *getShapeInfoHostPointer() {
return scalarShapeInfo->data;
}
Nd4jLong * getShapeInfoGpuPointer() {
return scalarShapeInfo->gData;
}
Nd4jLong * getDimensionHostPointer() {
return scalarDimension->data;
}
Nd4jLong * getDimensionGpuPointer() {
return scalarDimension->gData;
}
};
template <typename T>
class ScalarInfo {
nd4j::buffer::Buffer<T> *scalarData;
ScalarShapeInformation *shapeInfo;
T finalResult;
hipStream_t streamRef;
public:
ScalarInfo(hipStream_t stream) {
T *scalarResult = reinterpret_cast<T*>(malloc(sizeof(T)));
CHECK_ALLOC(scalarResult, "Failed to allocate new scalar buffer");
shapeInfo = new ScalarShapeInformation(stream);
scalarData = nd4j::buffer::createBuffer(scalarResult,1, stream);
streamRef = stream;
nd4j::buffer::copyDataToGpu(&scalarData, stream);
}
T getFinalResultFromDevice() {
nd4j::buffer::copyDataFromGpu(&scalarData, streamRef);
return scalarData->data[0];
}
/**
* Get the device shape information
* representing a scalar
*/
Nd4jLong *getDeviceShapeInfo() {
return shapeInfo->getShapeInfoGpuPointer();
}
/**
* Get the result pointers
*/
T *getDevicePointer() {
return scalarData->gData;
}
/**
* Get the infinite dimension device pointer
*/
Nd4jLong *getDimensionDevicePointer() {
return shapeInfo->getDimensionGpuPointer();
}
~ScalarInfo() {
nd4j::buffer::freeBuffer(&scalarData);
delete shapeInfo;
}
};
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
*/
double NativeOps::execIndexReduceScalarDouble(Nd4jPointer *extraPointers,int opNum,
double *x,
Nd4jLong *xShapeInfo,
double *extraParams) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
Nd4jLong *hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
Nd4jLong *hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
Nd4jLong *deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
Nd4jLong *deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D1 opNum:[%i]\n", opNum);
double *resultPointer = reinterpret_cast<double *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[27], 1, sizeof(double), 3);
functions::indexreduce::IndexReduce<double>::executeIndexReduceScalar(launchDims, stream, opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
resultPointer,
nullptr, 0,
nullptr,
1,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
double result = resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execIndexReduceDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
Nd4jLong *xShapeInfo,
double *extraParams,
double *result,
Nd4jLong *resultShapeInfo,
int *dimension, int dimensionLength) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
Nd4jLong *hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
Nd4jLong *hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
Nd4jLong *hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
Nd4jLong *deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
Nd4jLong *deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D2 opNum:[%i]\n", opNum);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[27], dimensionLength, sizeof(double), 3);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
functions::indexreduce::IndexReduce<double>::executeIndexReduce(launchDims, stream,
opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
result,
resultShapeInfo, shape::rank(hostZShapeInfo),
dimension,
dimensionLength,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execBroadcastDouble(Nd4jPointer *extraPointers,
int opNum,
double *x,
Nd4jLong *xShapeInfo,
double *y,
Nd4jLong *yShapeInfo,
double *result,
Nd4jLong *resultShapeInfo,
int *dimension, int dimensionLength){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostYShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[7]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
auto deviceTADShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[12]);
auto deviceTADOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[13]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D3 opNum:[%i]\n", opNum);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[26], dimensionLength, sizeof(double), 2);
functions::broadcast::Broadcast<double>::executeBroadcast(launchDims, stream, opNum, x, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, dimension, dimensionLength, deviceTADShapeInfo, deviceTADOffsets, deviceTADShapeInfoZ, deviceTADOffsetsZ);
}
/**
*
* @param opNum
* @param dx
* @param xStride
* @param y
* @param yStride
* @param result
* @param resultStride
* @param extraParams
* @param n
*/
void NativeOps::execPairwiseTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
double *dx,
Nd4jLong xStride,
double *y,
Nd4jLong yStride,
double *result,
Nd4jLong resultStride,
double *extraParams, Nd4jLong n) {
dim3 launchDims(512, 512, 2048);
functions::pairwise_transforms::PairWiseTransform<double>::execudaCudaStrided(launchDims, extraPointers, opNum, dx, xStride, y, yStride, result, resultStride, extraParams, n);
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
* @param xIndexes
* @param yIndexes
* @param resultIndexes
*/
void NativeOps::execPairwiseTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
double *dx,
Nd4jLong *xShapeInfo,
double *y,
Nd4jLong *yShapeInfo,
double *result,
Nd4jLong *resultShapeInfo,
double *extraParams,
Nd4jLong *xIndexes,
Nd4jLong *yIndexes,
Nd4jLong *resultIndexes) {
///
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execPairwiseTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
double *dx,
Nd4jLong *xShapeInfo,
double *y,
Nd4jLong *yShapeInfo,
double *result,
Nd4jLong *resultShapeInfo,
double *extraParams) {
dim3 launchDims(512, 512, 2048);
functions::pairwise_transforms::PairWiseTransform<double>::execudaCudaShaped(launchDims, extraPointers, opNum, dx, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, extraParams);;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduceDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
Nd4jLong *xShapeInfo,
double *extraParams,
double *result,
Nd4jLong *resultShapeInfo) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D7 opNum:[%i]\n", opNum);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
if (opNum == 19) {
execReduceDouble(extraPointers, 3, x, xShapeInfo, extraParams, result, resultShapeInfo);
}
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[22], 1, sizeof(double), 1);
// this macro builds bunch of IF/ELSE selectors for kernel launch
functions::reduce::ReduceFunction<double>::execReduceScalar(launchDims, stream, opNum, x, xShapeInfo, extraParams, result, resultShapeInfo, nullptr,1 , reductionPointer, deviceTADShapeInfo);
nd4j::DebugHelper::checkErrorCode(stream, "execReduceDouble(...) failed");
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduceDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
Nd4jLong *xShapeInfo,
double *extraParams,
double *result,
Nd4jLong *resultShapeInfo,
int *dimension,
int dimensionLength) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D8 opNum:[%i]\n", opNum);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
if (opNum == 19) {
execReduceDouble(extraPointers, 3, x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength);
}
/**
* We have separate kernels, optimized for different number of dimensions for reductions
*/
if (dimensionLength == 1) {
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[32], dimensionLength, sizeof(double), 2);
functions::reduce::ReduceFunction<double>::execReduceXD(launchDims, stream, opNum, 1, x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
} else if (shape::rank(hostTADShapeInfo) <= 3) {
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[33], dimensionLength, sizeof(double), 2);
functions::reduce::ReduceFunction<double>::execReduceXD(launchDims, stream, opNum, shape::rank(hostTADShapeInfo), x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
} else {
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[22], dimensionLength, sizeof(double), 2);
functions::reduce::ReduceFunction<double>::execReduceXD(launchDims, stream, opNum, shape::rank(hostTADShapeInfo), x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
}
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @return
*/
double NativeOps::execReduceScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
Nd4jLong *xShapeInfo,
double *extraParams){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D9 opNum:[%i]\n", opNum);
double *resultPointer = reinterpret_cast<double *>(extraPointers[5]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
//dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[22], 1, sizeof(double), 1);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[22]);
// for LogExpSum op we need to know max value, and store it
if (opNum == 19) {
double tmp = execReduceScalarDouble(extraPointers, 3, x, xShapeInfo, extraParams);
extraParams = resultPointer;
};
// this macro builds bunch of IF/ELSE selectors for kernel launch
//DISPATCH_SIMPLE(reduceScalarSimple, double, PARAMS(x, xShapeInfo, extraParams, resultPointer, nullptr, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS))
functions::reduce::ReduceFunction<double>::execReduceScalar(launchDims, stream, opNum, x, xShapeInfo, extraParams, resultPointer, nullptr, nullptr,1 , reductionPointer, deviceTADShapeInfo);
nd4j::DebugHelper::checkErrorCode(stream, "execReduceScalarDouble(...) failed");
double result = resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduce3Double(
Nd4jPointer *extraPointers,
int opNum,
double *x,
Nd4jLong *xShapeInfo,
double *extraParams,
double *y,
Nd4jLong *yShapeInfo,
double *result,
Nd4jLong *resultShapeInfo) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
auto yDeviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[12]);
auto yDeviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[13]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D10 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[21]);
hipLaunchKernelGGL(( reduce3Double), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
nullptr,
1,
1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
DEBUG_KERNEL(stream, opNum);
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
*/
double NativeOps::execReduce3ScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
Nd4jLong *xShapeInfo,
double *extraParams,
double *y,
Nd4jLong *yShapeInfo){
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D11 opNum:[%i]\n", opNum);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
auto resultPointer = reinterpret_cast<double *>(extraPointers[5]);
auto allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
auto yDeviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[12]);
auto yDeviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[13]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[21]);
hipLaunchKernelGGL(( reduce3ScalarDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
resultPointer,
nullptr,
nullptr,
1,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
// since this method should return scalar value - we should block on this call
nd4j::DebugHelper::checkErrorCode(stream, "execReduce3ScalarDouble(...) failed");
double result = resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execReduce3Double(
Nd4jPointer *extraPointers,
int opNum,
double *x,
Nd4jLong *xShapeInfo,
double *extraParams,
double *y,
Nd4jLong *yShapeInfo,
double *result,
Nd4jLong *resultShapeInfo,
int *dimension,
int dimensionLength){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D12 opNum:[%i]\n", opNum);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
auto yDeviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[12]);
Nd4jLong *yDeviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[13]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[21]);
hipLaunchKernelGGL(( reduce3Double), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
dimension,
dimensionLength,
1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
DEBUG_KERNEL(stream, opNum);
}
/**
*
* @param opNum
* @param x
* @param xStride
* @param result
* @param resultStride
* @param scalar
* @param extraParams
* @param n
*/
void NativeOps::execScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
Nd4jLong xStride,
double *result,
Nd4jLong resultStride,
double scalar,
double *extraParams,
Nd4jLong n) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[20]);
functions::scalar::ScalarTransform<double>::executeCudaStrided(launchDims, extraPointers, opNum, x, xStride, result, resultStride, scalar, extraParams, n);
DEBUG_KERNEL(stream, opNum);
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param scalar
* @param extraParams
* @param n
*/
void NativeOps::execScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
Nd4jLong *xShapeInfo,
double *result,
Nd4jLong *resultShapeInfo,
double scalar,
double *extraParams){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[19]);
// this macro builds bunch of IF/ELSE selectors for kernel launch
//DISPATCH_SIMPLE(scalarSimpleShaped, double, PARAMS(scalar, x, xShapeInfo, extraParams, result, resultShapeInfo, allocPointer), OPS_A(SCALAR_OPS))
functions::scalar::ScalarTransform<double>::executeCudaShaped(launchDims, extraPointers, opNum, x, xShapeInfo, result, resultShapeInfo, scalar, extraParams);
DEBUG_KERNEL(stream, opNum);
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param scalar
* @param extraParams
* @param n
* @param xIndexes
* @param resultIndexes
*/
void NativeOps::execScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
Nd4jLong *xShapeInfo,
double *result,
Nd4jLong *resultShapeInfo,
double scalar,
double *extraParams,
Nd4jLong n,
Nd4jLong *xIndexes,
Nd4jLong *resultIndexes){
printf("Unsupported operation: scalarIndices\n");
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
*/
double NativeOps::execSummaryStatsScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
Nd4jLong *xShapeInfo,
double *extraParams,bool biasCorrected){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
double *resultPointer = reinterpret_cast<double *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], 1, sizeof(double), 8);
launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x);
return functions::summarystats::SummaryStatsReduce<double>::execSummaryStatsReduceScalar(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, biasCorrected);
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execSummaryStatsDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
Nd4jLong *xShapeInfo,
double *extraParams,
double *result,
Nd4jLong *resultShapeInfo,bool biasCorrected) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D17 opNum:[%i]\n", opNum);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], 1, sizeof(double), 8);
// we have to limit grid size here, due to limited nature of reduction/allocation pointers
launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x);
functions::summarystats::SummaryStatsReduce<double>::execSummaryStatsReduce(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, result, resultShapeInfo, biasCorrected);
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execSummaryStatsDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
Nd4jLong *xShapeInfo,
double *extraParams,
double *result,
Nd4jLong *resultShapeInfo,
int *dimension, int dimensionLength,bool biasCorrected){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], dimensionLength, sizeof(double), 8);
// we're limiting maximum grid size for summaryStats ops
launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x);
functions::summarystats::SummaryStatsReduce<double>::execSummaryStatsReduce(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, biasCorrected);
}
/**
*
* @param opNum
* @param dx
* @param xStride
* @param result
* @param resultStride
* @param extraParams
* @param n
*/
void NativeOps::execTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
double *dx,
Nd4jLong xStride,
double *z,
Nd4jLong zStride,
double *extraParams,
Nd4jLong n) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D19 opNum:[%i]\n", opNum);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[16]);
functions::transform::Transform<double>::executeTransformStrided(launchDims, stream, opNum, n, dx, xStride, extraParams, z, zStride, allocPointer, reductionPointer);
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
double *dx,
Nd4jLong *xShapeInfo,
double *result,
Nd4jLong *resultShapeInfo,
double *extraParams){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D20 opNum:[%i]\n", opNum);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostYShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[7]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
int *maskedAllocPointer = allocPointer;
// special pointer for special buffer for special ops
double *specialPointer = reinterpret_cast<double *>(extraPointers[6]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[1]);
auto dimension = reinterpret_cast<int *>(specialPointer);
int *maxDimension = dimension + 1;
auto maxShapeBuffer = reinterpret_cast<Nd4jLong *>(maxDimension + 1);
double * special = reinterpret_cast<double *>(maxShapeBuffer + (MAX_RANK * 2 + 4));
auto devTadShapeInfo = reinterpret_cast<Nd4jLong *> (extraPointers[10]);
auto devTadOffsets = reinterpret_cast<Nd4jLong *> (extraPointers[11]);
/**
* ops between 38 and 41 are special ops:
* SoftMax, LogSoftMax, SoftMaxDerivative, IsMax
* On cuda we execute them as
*/
// simple trick to get workaround over reductions into scalar
if (opNum >= 38 && opNum <= 41) {
if (shape::isVector(hostXShapeInfo) && opNum != 41) {
// if that's vector, we just go directly to op in 1 block
/*
* For vector cases of everything, but IsMax (41) we go for single-kernel calls
*/
int length = shape::length(hostXShapeInfo);
int block = nd4j::math::nd4j_min<int>(256, length);
launchDims.x = 1;
launchDims.y = block;
launchDims.z += (block * sizeof(double) * 4);
functions::transform::Transform<double>::executeTransformShaped(launchDims, stream, opNum, dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), allocPointer, reductionPointer, devTadShapeInfo, devTadOffsets);
} else {
// going for blockwise specials
// we'll do some pointers mangling here, and execute kernels one by one
auto shape = shape::shapeOf(hostXShapeInfo);
switch (opNum) {
case 40: // LogSoftMax
case 39: // SoftMax Derivative
case 38: {// softmax
Nd4jPointer tempPointers[16];
tempPointers[0] = extraPointers[0];
tempPointers[1] = extraPointers[1];
tempPointers[2] = extraPointers[2];
tempPointers[3] = extraPointers[3];
tempPointers[4] = extraPointers[4];
tempPointers[5] = extraPointers[5];
tempPointers[6] = extraPointers[6];
tempPointers[7] = extraPointers[7];
tempPointers[8] = extraPointers[8];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[12];
tempPointers[13] = extraPointers[13];
tempPointers[14] = extraPointers[14];
tempPointers[15] = extraPointers[15];
Nd4jLong maxShape[2] = {shape::shapeOf(hostXShapeInfo)[0], 1};
auto hostMaxShapeBuffer = shape::shapeBuffer(2, maxShape);
tempPointers[7] = (Nd4jPointer) hostMaxShapeBuffer;
tempPointers[8] = (Nd4jPointer) hostMaxShapeBuffer;
// TODO: we could get rid of this one eventually
hipLaunchKernelGGL(( prepareShapeBuffer) , dim3(1), dim3(1), 128, *stream, dimension, maxDimension, maxShapeBuffer, shape[0]);
DEBUG_KERNEL(stream, opNum);
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
// max 3
execReduceDouble(tempPointers, 3, dx, xShapeInfo, extraParams, special, maxShapeBuffer, maxDimension, 1);
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[10];
tempPointers[13] = extraPointers[11];
// sub 1
execBroadcastDouble(tempPointers, 1, dx, xShapeInfo, special,
maxShapeBuffer, result, resultShapeInfo, dimension, 1);
// exp 3
execTransformDouble(extraPointers, 3, result, resultShapeInfo, result, resultShapeInfo, extraParams);
tempPointers[8] = tempPointers[7];
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
//sum 1
execReduceDouble(tempPointers, 1, result, resultShapeInfo, extraParams, special,
maxShapeBuffer, maxDimension, 1);
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[10];
tempPointers[13] = extraPointers[11];
// divide 3
execBroadcastDouble(tempPointers, 3, result, resultShapeInfo, special,
maxShapeBuffer, result, resultShapeInfo, dimension, 1);
// log 3
if (opNum == 40)
execTransformDouble(extraPointers, 5, result, resultShapeInfo, result, resultShapeInfo, extraParams);
else if (opNum == 39)
execTransformDouble(extraPointers, 42, result, resultShapeInfo, result, resultShapeInfo, extraParams);
nd4j::DebugHelper::checkErrorCode(stream, "SoftMax failed failed");
delete hostMaxShapeBuffer;
break;
}
case 41: {
// IsMax along all dimensions
bool scalarCheat = false;
if (extraParams == nullptr) {
scalarCheat = true;
}
if (scalarCheat) {
/**
* In case of vector-input for IsMax, it just turns into IndexReduce call + further filler call
*/
int maxIdx = (int) execIndexReduceScalarDouble(extraPointers, 0, dx, xShapeInfo, extraParams);
int targetIdx = 0;
if (shape::order(hostXShapeInfo) == 'c' || shape::order(hostXShapeInfo) == 'f' && maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1] >= shape::length(hostXShapeInfo))
targetIdx = maxIdx;
else
targetIdx = maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1];
hipLaunchKernelGGL(( fillIsMaxDouble), dim3(1), dim3(128), 0, *stream , result, shape::length(hostXShapeInfo), targetIdx);
} else {
auto tadMaxShapeInfo = reinterpret_cast<Nd4jLong *> (extraPointers[10]);
auto tadMaxOffsets = reinterpret_cast<Nd4jLong *> (extraPointers[11]);
int *dimension = reinterpret_cast<int *> (extraPointers[15]);
special = reinterpret_cast<double *>(extraPointers[17]);
int dimensionLength = getDeviceId(extraPointers[18]);
// we call for IMax on specified dimension
execIndexReduceDouble(extraPointers, 0, dx, xShapeInfo, extraParams, special, hostYShapeInfo, dimension, dimensionLength);
DEBUG_KERNEL(stream, opNum);
// at this point, all IMax indexes are gathered, and we execute filler
hipLaunchKernelGGL(( fillDimensionalIsMaxDouble), dim3(blockLimit), dim3(64), funcAttributes[37].sharedSizeBytes, *stream, special, hostYShapeInfo, result, resultShapeInfo, tadMaxShapeInfo, dimension, dimensionLength, tadMaxOffsets );
nd4j::DebugHelper::checkErrorCode(stream, "Legacy IsMax(...) failed");
}
break;
}
default: {
printf("Bad case for transformDouble\n");
break;
}
}
}
} else {
// for Im2Col & Col2Im we enforce higher dimensionality
// TODO: investigate this on high-end gpus
if (opNum == 37 || opNum == 36 || opNum == 71) {
launchDims.x = 512;
launchDims.y = 512;
launchDims.z += 512 * sizeof(double);
} else if (opNum == 70) {
// we'll be using shared memory to speed up reverse
launchDims.z += launchDims.y * sizeof(double);
}
// Histogram op requires additional memory chunk
// FIXME: make this one to use cache
if (opNum == 48) {
int length = shape::length(hostZShapeInfo);
hipMalloc(reinterpret_cast<void **>(&maskedAllocPointer), length * launchDims.x * sizeof(double));
}
if (opNum == 71) {
launchDims.z += 512 * sizeof(double);
}
functions::transform::Transform<double>::executeTransformShaped(launchDims, stream, opNum, dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), maskedAllocPointer, reductionPointer, devTadShapeInfo, devTadOffsets);
// we need guaranteed sync here, due to temp memory release
if (opNum == 48)
nd4j::DebugHelper::checkErrorCode(stream, "execTransformShaped(...) failed");
// release Histogram memory
if (opNum == 48) {
hipFree(reinterpret_cast<void *>(maskedAllocPointer));
}
}
DEBUG_KERNEL(stream, opNum);
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
double *dx,
Nd4jLong *xShapeInfo,
double *result,
Nd4jLong *resultShapeInfo,
double *extraParams,
Nd4jLong *xIndexes,
Nd4jLong *resultIndexes) {
//
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
*/
float NativeOps::execIndexReduceScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
Nd4jLong *xShapeInfo,
float *extraParams){
if (nd4j::Environment::getInstance()->isDebug())
printf("F1 opNum:[%i]\n", opNum);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostYShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[7]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
float *resultPointer = reinterpret_cast<float *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[13], 1, sizeof(float), 4);
if (nd4j::Environment::getInstance()->isDebugAndVerbose() && launchDims.x == 1)
printf("AF1 opNum:[%i]\n", opNum);
functions::indexreduce::IndexReduce<float>::executeIndexReduceScalar(launchDims, stream, opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
resultPointer,
nullptr, 0,
nullptr,
1,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
nd4j::DebugHelper::checkErrorCode(stream, "execIndexReduceScalarFloat(...) failed");
float result = resultPointer[0];
return result;
}
float NativeOps::execIndexReduceScalarHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
Nd4jLong *xShapeInfo,
float16 *extraParams){
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H1 opNum:[%i]\n", opNum);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
float16 *resultPointer = reinterpret_cast<float16 *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[13], 1, sizeof(float16), 8);
if (nd4j::Environment::getInstance()->isDebugAndVerbose() && launchDims.x == 1)
printf("AH1 opNum:[%i]\n", opNum);
functions::indexreduce::IndexReduce<float16>::executeIndexReduceScalar(launchDims, stream, opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
resultPointer,
nullptr, 0,
nullptr,
1,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
float result = (float) resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execIndexReduceFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
Nd4jLong *xShapeInfo,
float *extraParams,
float *result,
Nd4jLong *resultShapeInfo,
int *dimension,
int dimensionLength){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F2 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[13], dimensionLength, sizeof(float), 4);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF2 opNum:[%i]\n", opNum);
functions::indexreduce::IndexReduce<float>::executeIndexReduce(launchDims, stream, opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
result,
resultShapeInfo, shape::rank(hostZShapeInfo),
dimension,
dimensionLength,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
}
void NativeOps::execIndexReduceHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
Nd4jLong *xShapeInfo,
float16 *extraParams,
float16 *result,
Nd4jLong *resultShapeInfo,
int *dimension,
int dimensionLength){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H2 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[13], dimensionLength, sizeof(float16), 8);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH2 opNum:[%i]\n", opNum);
functions::indexreduce::IndexReduce<float16>::executeIndexReduce(launchDims, stream, opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
result,
resultShapeInfo, shape::rank(hostZShapeInfo),
dimension,
dimensionLength,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execBroadcastFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
Nd4jLong *xShapeInfo,
float *y,
Nd4jLong *yShapeInfo,
float *result,
Nd4jLong *resultShapeInfo,
int *dimension, int dimensionLength){
/*
hipEvent_t start;
hipEventCreateWithFlags(&start, hipEventDisableTiming);
timespec tsX;
timespec tsY;
clock_gettime(CLOCK_REALTIME, &tsX);
*/
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostYShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[7]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
auto deviceTADShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[12]);
auto deviceTADOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[13]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F3 opNum:[%i]\n", opNum);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[12], 1, sizeof(float), 0);
functions::broadcast::Broadcast<float>::executeBroadcast(launchDims, stream, opNum, x, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, dimension, dimensionLength, deviceTADShapeInfo, deviceTADOffsets, deviceTADShapeInfoZ, deviceTADOffsetsZ);
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execBroadcastHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
Nd4jLong *xShapeInfo,
float16 *y,
Nd4jLong *yShapeInfo,
float16 *result,
Nd4jLong *resultShapeInfo,
int *dimension, int dimensionLength){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostYShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[7]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
auto deviceTADShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[12]);
auto deviceTADOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[13]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H3 opNum:[%i]\n", opNum);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[12], 1, sizeof(float16), 0);
functions::broadcast::Broadcast<float16>::executeBroadcast(launchDims, stream, opNum, x, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, dimension, dimensionLength, deviceTADShapeInfo, deviceTADOffsets, deviceTADShapeInfoZ, deviceTADOffsetsZ);
}
/**
*
* @param opNum
* @param dx
* @param xStride
* @param y
* @param yStride
* @param result
* @param resultStride
* @param extraParams
* @param n
*/
void NativeOps::execPairwiseTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
float *dx,
Nd4jLong xStride,
float *y,
Nd4jLong yStride,
float *result,
Nd4jLong resultStride,
float *extraParams, Nd4jLong n){
dim3 launchDims(512, 512, 2048);
functions::pairwise_transforms::PairWiseTransform<float>::execudaCudaStrided(launchDims, extraPointers, opNum, dx, xStride, y, yStride, result, resultStride, extraParams, n);
}
void NativeOps::execPairwiseTransformHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *dx,
Nd4jLong xStride,
float16 *y,
Nd4jLong yStride,
float16 *result,
Nd4jLong resultStride,
float16 *extraParams, Nd4jLong n){
dim3 launchDims(512, 512, 2048);
functions::pairwise_transforms::PairWiseTransform<float16>::execudaCudaStrided(launchDims, extraPointers, opNum, dx, xStride, y, yStride, result, resultStride, extraParams, n);
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
* @param xIndexes
* @param yIndexes
* @param resultIndexes
*/
void NativeOps::execPairwiseTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
float *dx,
Nd4jLong *xShapeInfo,
float *y,
Nd4jLong *yShapeInfo,
float *result,
Nd4jLong *resultShapeInfo,
float *extraParams,
Nd4jLong *xIndexes,
Nd4jLong *yIndexes,
Nd4jLong *resultIndexes){
///
}
void NativeOps::execPairwiseTransformHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *dx,
Nd4jLong *xShapeInfo,
float16 *y,
Nd4jLong *yShapeInfo,
float16 *result,
Nd4jLong *resultShapeInfo,
float16 *extraParams,
Nd4jLong *xIndexes,
Nd4jLong *yIndexes,
Nd4jLong *resultIndexes){
///
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execPairwiseTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
float *dx,
Nd4jLong *xShapeInfo,
float *y,
Nd4jLong *yShapeInfo,
float *result,
Nd4jLong *resultShapeInfo,
float *extraParams){
dim3 launchDims(512, 512, 2048);
functions::pairwise_transforms::PairWiseTransform<float>::execudaCudaShaped(launchDims, extraPointers, opNum, dx, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, extraParams);;
}
void NativeOps::execPairwiseTransformHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *dx,
Nd4jLong *xShapeInfo,
float16 *y,
Nd4jLong *yShapeInfo,
float16 *result,
Nd4jLong *resultShapeInfo,
float16 *extraParams){
dim3 launchDims(512, 512, 2048);
functions::pairwise_transforms::PairWiseTransform<float16>::execudaCudaShaped(launchDims, extraPointers, opNum, dx, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, extraParams);;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduceFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
Nd4jLong *xShapeInfo,
float *extraParams,
float *result,
Nd4jLong *resultShapeInfo) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F7 opNum:[%i]\n", opNum);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[8], 1, sizeof(float), 1);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF7 opNum:[%i]\n", opNum);
if (opNum == 19) {
execReduceFloat(extraPointers, 3, x, xShapeInfo, extraParams, result, resultShapeInfo);
}
// this macro builds bunch of IF/ELSE selectors for kernel launch
//DISPATCH_SIMPLE(reduceScalarSimple, float, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS))
functions::reduce::ReduceFunction<float>::execReduceScalar(launchDims, stream, opNum, x, xShapeInfo, extraParams, result, resultShapeInfo, nullptr,1 , reductionPointer, deviceTADShapeInfo);
nd4j::DebugHelper::checkErrorCode(stream, "execReduceFloat(...) failed");
}
void NativeOps::execReduceHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
Nd4jLong *xShapeInfo,
float16 *extraParams,
float16 *result,
Nd4jLong *resultShapeInfo) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H7 opNum:[%i]\n", opNum);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[8], 1, sizeof(float16), 1);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH7 opNum:[%i]\n", opNum);
if (opNum == 19) {
execReduceHalf(extraPointers, 3, x, xShapeInfo, extraParams, result, resultShapeInfo);
}
// this macro builds bunch of IF/ELSE selectors for kernel launch
//DISPATCH_SIMPLE(reduceScalarSimple, float16, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS))
functions::reduce::ReduceFunction<float16>::execReduceScalar(launchDims, stream, opNum, x, xShapeInfo, extraParams, result, resultShapeInfo, nullptr,1 , reductionPointer, deviceTADShapeInfo);
nd4j::DebugHelper::checkErrorCode(stream, "execReduceHalf(...) failed");
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduceFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
Nd4jLong *xShapeInfo,
float *extraParams,
float *result,
Nd4jLong *resultShapeInfo,
int *dimension,int dimensionLength){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F8 opNum:[%i]\n", opNum);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
// dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[8], dimensionLength, sizeof(float), 1);
if (opNum == 19) {
execReduceFloat(extraPointers, 3, x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength);
}
if (dimensionLength == 1) {
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[32], dimensionLength, sizeof(double), 2);
functions::reduce::ReduceFunction<float>::execReduceXD(launchDims, stream, opNum, 1, x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
} else if (shape::rank(hostTADShapeInfo) <= 3) {
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[33], dimensionLength, sizeof(double), 2);
functions::reduce::ReduceFunction<float>::execReduceXD(launchDims, stream, opNum, shape::rank(hostTADShapeInfo), x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
} else {
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[22], dimensionLength, sizeof(double), 2);
functions::reduce::ReduceFunction<float>::execReduceXD(launchDims, stream, opNum, shape::rank(hostTADShapeInfo), x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
}
}
void NativeOps::execReduceHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
Nd4jLong *xShapeInfo,
float16 *extraParams,
float16 *result,
Nd4jLong *resultShapeInfo,
int *dimension,int dimensionLength){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H8 opNum:[%i]\n", opNum);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[8], dimensionLength, sizeof(float16), 1);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH8 opNum:[%i]\n", opNum);
if (opNum == 19) {
execReduceHalf(extraPointers, 3, x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength);
}
if (dimensionLength == 1) {
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[32], dimensionLength, sizeof(double), 2);
functions::reduce::ReduceFunction<float16>::execReduceXD(launchDims, stream, opNum, 1, x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
} else if (shape::rank(hostTADShapeInfo) <= 3) {
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[33], dimensionLength, sizeof(double), 2);
functions::reduce::ReduceFunction<float16>::execReduceXD(launchDims, stream, opNum, shape::rank(hostTADShapeInfo), x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
} else {
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[22], dimensionLength, sizeof(double), 2);
functions::reduce::ReduceFunction<float16>::execReduceXD(launchDims, stream, opNum, shape::rank(hostTADShapeInfo), x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
}
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @return
*/
float NativeOps::execReduceScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
Nd4jLong *xShapeInfo,
float *extraParams){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F9 opNum:[%i]\n", opNum);
float *resultPointer = reinterpret_cast<float *>(extraPointers[5]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 8, funcAttributes[8]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF9 opNum:[%i]\n", opNum);
// for LogExpSum op we need to know max value, and store it
if (opNum == 19) {
float tmp = execReduceScalarFloat(extraPointers, 3, x, xShapeInfo, extraParams);
extraParams = resultPointer;
};
// this macro builds bunch of IF/ELSE selectors for kernel launch
//DISPATCH_SIMPLE(reduceScalarSimple, float, PARAMS(x, xShapeInfo, extraParams, resultPointer, nullptr, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS))
functions::reduce::ReduceFunction<float>::execReduceScalar(launchDims, stream, opNum, x, xShapeInfo, extraParams, resultPointer, nullptr, nullptr,1 , reductionPointer, deviceTADShapeInfo);
// blocking this one
nd4j::DebugHelper::checkErrorCode(stream, "execReduceScalarFloat(...) failed");
float result = resultPointer[0];
return result;
}
float NativeOps::execReduceScalarHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
Nd4jLong *xShapeInfo,
float16 *extraParams){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H9 opNum:[%i]\n", opNum);
float16 *resultPointer = reinterpret_cast<float16 *>(extraPointers[5]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 2, funcAttributes[8]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH9 opNum:[%i]\n", opNum);
// for LogExpSum op we need to know max value, and store it
if (opNum == 19) {
float tmp = execReduceScalarHalf(extraPointers, 3, x, xShapeInfo, extraParams);
extraParams = resultPointer;
};
// this macro builds bunch of IF/ELSE selectors for kernel launch
//DISPATCH_SIMPLE(reduceScalarSimple, float16, PARAMS(x, xShapeInfo, extraParams, resultPointer, nullptr, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS))
functions::reduce::ReduceFunction<float16>::execReduceScalar(launchDims, stream, opNum, x, xShapeInfo, extraParams, resultPointer, nullptr, nullptr,1 , reductionPointer, deviceTADShapeInfo);
// blocking call
nd4j::DebugHelper::checkErrorCode(stream, "execReduceScalarHalf(...) failed");
float result = (float) resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduce3Float(
Nd4jPointer *extraPointers,
int opNum,
float *x,
Nd4jLong *xShapeInfo,
float *extraParams,
float *y,
Nd4jLong *yShapeInfo,
float *result,
Nd4jLong *resultShapeInfo){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
auto yDeviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[12]);
auto yDeviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[13]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F10 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[7]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF10 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( reduce3ScalarFloat), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
nullptr,
1,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execReduce3Half(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
Nd4jLong *xShapeInfo,
float16 *extraParams,
float16 *y,
Nd4jLong *yShapeInfo,
float16 *result,
Nd4jLong *resultShapeInfo){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
auto yDeviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[12]);
auto yDeviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[13]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H10 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 8, funcAttributes[7]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH10 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( reduce3ScalarHalf), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
nullptr,
1,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
DEBUG_KERNEL(stream, opNum);
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
*/
float NativeOps::execReduce3ScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
Nd4jLong *xShapeInfo,
float *extraParams,
float *y,
Nd4jLong *yShapeInfo) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
auto yDeviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[12]);
auto yDeviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[13]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F11 opNum:[%i]\n", opNum);
float *resultPointer = reinterpret_cast<float *>(extraPointers[5]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 32, funcAttributes[7]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF11 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( reduce3ScalarFloat), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
resultPointer,
nullptr,
nullptr,
1,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
// blocking call
nd4j::DebugHelper::checkErrorCode(stream, "execReduce3ScalarFloat(...) failed");
float result = resultPointer[0];
return result;
}
float NativeOps::execReduce3ScalarHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
Nd4jLong *xShapeInfo,
float16 *extraParams,
float16 *y,
Nd4jLong *yShapeInfo) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
auto yDeviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[12]);
auto yDeviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[13]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H11 opNum:[%i]\n", opNum);
float16 *resultPointer = reinterpret_cast<float16 *>(extraPointers[5]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[7]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH11 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( reduce3ScalarHalf), dim3(launchDims.x),dim3(launchDims.y),launchDims.z + 2048, *stream,
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
resultPointer,
nullptr,
nullptr,
1,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
// blocking call
nd4j::DebugHelper::checkErrorCode(stream, "execReduce3ScalarHalf(...) failed");
float result = (float) resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execReduce3Float(
Nd4jPointer *extraPointers,
int opNum,
float *x,
Nd4jLong *xShapeInfo,
float *extraParams,
float *y,
Nd4jLong *yShapeInfo,
float *result,
Nd4jLong *resultShapeInfo,
int *dimension,
int dimensionLength){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
auto yDeviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[12]);
auto yDeviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[13]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F12 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[7]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF12 opNum:[%i]\n", opNum);
if (shape::isScalar(hostZShapeInfo) || dimension == nullptr) {
reduce3ScalarFloat << < launchDims.x, launchDims.y, launchDims.z, *stream >> > (
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
dimension,
dimensionLength,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
} else {
reduce3Float << < 1, launchDims.y, launchDims.z, *stream >> > (
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
dimension,
dimensionLength,
1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
}
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execReduce3Half(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
Nd4jLong *xShapeInfo,
float16 *extraParams,
float16 *y,
Nd4jLong *yShapeInfo,
float16 *result,
Nd4jLong *resultShapeInfo,
int *dimension,
int dimensionLength){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
auto yDeviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[12]);
auto yDeviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[13]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H12 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 8, funcAttributes[7]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH12 opNum:[%i]\n", opNum);
if (shape::isScalar(hostZShapeInfo) || dimension == nullptr) {
reduce3ScalarHalf<< < launchDims.x, launchDims.y, launchDims.z, *stream >> > (
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
dimension,
dimensionLength,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
} else {
reduce3Half<< < 1, launchDims.y, launchDims.z, *stream >> > (
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
dimension,
dimensionLength,
1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
}
DEBUG_KERNEL(stream, opNum);
}
/**
*
* @param opNum
* @param x
* @param xStride
* @param result
* @param resultStride
* @param scalar
* @param extraParams
* @param n
*/
void NativeOps::execScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
Nd4jLong xStride,
float *result,
Nd4jLong resultStride,
float scalar,
float *extraParams,
Nd4jLong n){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[6]);
// this macro builds bunch of IF/ELSE selectors for kernel launch
functions::scalar::ScalarTransform<float>::executeCudaStrided(launchDims, extraPointers, opNum, x, xStride, result, resultStride, scalar, extraParams, n);
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execScalarHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
Nd4jLong xStride,
float16 *result,
Nd4jLong resultStride,
float scalar,
float16 *extraParams,
Nd4jLong n){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[6]);
// this macro builds bunch of IF/ELSE selectors for kernel launch
//DISPATCH_SIMPLE(scalarSimpleStrided, float16, PARAMS(n, scalar, x, xStride, extraParams, result, resultStride, allocPointer), OPS_A(SCALAR_OPS))
float16 sc = (float16) scalar;
functions::scalar::ScalarTransform<float16>::executeCudaStrided(launchDims, extraPointers, opNum, x, xStride, result, resultStride, sc, extraParams, n);
DEBUG_KERNEL(stream, opNum);
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param scalar
* @param extraParams
* @param n
*/
void NativeOps::execScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
Nd4jLong *xShapeInfo,
float *result,
Nd4jLong *resultShapeInfo,
float scalar,
float *extraParams){
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
Nd4jLong n = shape::length(hostXShapeInfo);
// if (nd4j::Environment::getInstance()->isDebugAndVerbose())
// printf("F14 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[5], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[5]);
//if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
// printf("AF14 opNum:[%i], xLength:[%i]\n", opNum, shape::length(hostXShapeInfo));
// this macro builds bunch of IF/ELSE selectors for kernel launch
//DISPATCH_SIMPLE(scalarSimpleShaped, float, PARAMS(scalar, x, xShapeInfo, extraParams, result, resultShapeInfo, allocPointer), OPS_A(SCALAR_OPS))
functions::scalar::ScalarTransform<float>::executeCudaShaped(launchDims, extraPointers, opNum, x, xShapeInfo, result, resultShapeInfo, scalar, extraParams);
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execScalarHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
Nd4jLong *xShapeInfo,
float16 *result,
Nd4jLong *resultShapeInfo,
float scalarF,
float16 *extraParams){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto n = shape::length(hostXShapeInfo);
//if (nd4j::Environment::getInstance()->isDebugAndVerbose())
// printf("H14 opNum:[%i]\n", opNum);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[5]);
float16 scalar = (float16) scalarF;
//if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
// printf("AH14 opNum:[%i], xLength:[%i]\n", opNum, shape::length(hostXShapeInfo));
// this macro builds bunch of IF/ELSE selectors for kernel launch
//DISPATCH_SIMPLE(scalarSimpleShaped, float16, PARAMS(scalar, x, xShapeInfo, extraParams, result, resultShapeInfo, allocPointer), OPS_A(SCALAR_OPS))
functions::scalar::ScalarTransform<float16>::executeCudaShaped(launchDims, extraPointers, opNum, x, xShapeInfo, result, resultShapeInfo, scalar, extraParams);
DEBUG_KERNEL(stream, opNum);
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param scalar
* @param extraParams
* @param n
* @param xIndexes
* @param resultIndexes
*/
void NativeOps::execScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
Nd4jLong *xShapeInfo,
float *result,
Nd4jLong *resultShapeInfo,
float scalar,
float *extraParams,
Nd4jLong *xIndexes,
Nd4jLong *resultIndexes){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto n = shape::length(hostXShapeInfo);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F15 opNum:[%i]\n", opNum);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[4]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF15 opNum:[%i]\n", opNum);
DEBUG_KERNEL(stream, opNum);
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
*/
float NativeOps::execSummaryStatsScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
Nd4jLong *xShapeInfo,
float *extraParams,bool biasCorrected){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
float *resultPointer = reinterpret_cast<float *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], 1, sizeof(float), 8);
// we limit grid size for SummaryStats calls
launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x);
return functions::summarystats::SummaryStatsReduce<float>::execSummaryStatsReduceScalar(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, biasCorrected);
}
float NativeOps::execSummaryStatsScalarHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
Nd4jLong *xShapeInfo,
float16 *extraParams,bool biasCorrected){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
float16 *resultPointer = reinterpret_cast<float16 *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
Nd4jLong *deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], 1, sizeof(float16), 8);
launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x);
return (float) functions::summarystats::SummaryStatsReduce<float16>::execSummaryStatsReduceScalar(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, biasCorrected);
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execSummaryStatsFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
Nd4jLong *xShapeInfo,
float *extraParams,
float *result,
Nd4jLong *resultShapeInfo,bool biasCorrected){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], 1, sizeof(float), 8);
// limiting number of blocks in grid, to match buffer memory size
launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x);
functions::summarystats::SummaryStatsReduce<float>::execSummaryStatsReduce(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, result, resultShapeInfo, biasCorrected);
}
void NativeOps::execSummaryStatsHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
Nd4jLong *xShapeInfo,
float16 *extraParams,
float16 *result,
Nd4jLong *resultShapeInfo,bool biasCorrected){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], 1, sizeof(float16), 8);
// as everywhere else, we limit maximal number of blocks for SummaryStats calls
launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x);
functions::summarystats::SummaryStatsReduce<float16>::execSummaryStatsReduce(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, result, resultShapeInfo, biasCorrected);
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execSummaryStatsFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
Nd4jLong *xShapeInfo,
float *extraParams,
float *result,
Nd4jLong *resultShapeInfo,
int *dimension,
int dimensionLength,bool biasCorrected){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], dimensionLength, sizeof(float), 8);
// as everywhere else, we limit maximal number of blocks for SummaryStats calls
launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x);
functions::summarystats::SummaryStatsReduce<float>::execSummaryStatsReduce(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, biasCorrected);
}
void NativeOps::execSummaryStatsHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
Nd4jLong *xShapeInfo,
float16 *extraParams,
float16 *result,
Nd4jLong *resultShapeInfo,
int *dimension,
int dimensionLength,
bool biasCorrected) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], dimensionLength, sizeof(float16), 8);
// as everywhere else, we limit maximal number of blocks for SummaryStats calls
launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x);
functions::summarystats::SummaryStatsReduce<float16>::execSummaryStatsReduce(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, biasCorrected);
}
/**
*
* @param opNum
* @param dx
* @param xStride
* @param result
* @param resultStride
* @param extraParams
* @param n
*/
void NativeOps::execTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
float *dx,
Nd4jLong xStride,
float *z,
Nd4jLong zStride,
float *extraParams,
Nd4jLong n) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F19 opNum:[%i]\n", opNum);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[2]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF19 opNum:[%i], xLength: [%i]\n", opNum, shape::length(hostXShapeInfo));
functions::transform::Transform<float>::executeTransformStrided(launchDims, stream, opNum, n, dx, xStride, extraParams, z, zStride, allocPointer, reductionPointer);
}
void NativeOps::execTransformHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *dx,
Nd4jLong xStride,
float16 *z,
Nd4jLong zStride,
float16 *extraParams,
Nd4jLong n) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H19 opNum:[%i]\n", opNum);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[2]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH19 opNum:[%i], xLength: [%i]\n", opNum, shape::length(hostXShapeInfo));
functions::transform::Transform<float16>::executeTransformStrided(launchDims, stream, opNum, n, dx, xStride, extraParams, z, zStride, allocPointer, reductionPointer);
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execTransformFloat(Nd4jPointer *extraPointers,int opNum,
float *dx,
Nd4jLong *xShapeInfo,
float *result,
Nd4jLong *resultShapeInfo,
float *extraParams) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostYShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[7]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F20 opNum:[%i]\n", opNum);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
// special pointer for special buffer for special ops
float *specialPointer = reinterpret_cast<float *>(extraPointers[6]);
int *dimension = reinterpret_cast<int *>(specialPointer);
int *maxDimension = dimension + 1;
auto maxShapeBuffer = reinterpret_cast<Nd4jLong *>(maxDimension + 1);
float * special = reinterpret_cast<float *> (maxShapeBuffer + (MAX_RANK * 2 + 4));
int *maskedAllocPointer = allocPointer;
auto devTadShapeInfo = reinterpret_cast<Nd4jLong *> (extraPointers[10]);
Nd4jLong *devTadOffsets = reinterpret_cast<Nd4jLong *> (extraPointers[11]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[1]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF20 opNum:[%i]\n", opNum);
// simple trick to get workaround over reductions into scalar
// that's special ops: SoftMax, SoftMaxDerivative, LogSoftMax, IsMax
if (opNum >= 38 && opNum <= 41) {
if (shape::isVector(hostXShapeInfo) && opNum != 41) {
// if that's vector, we just go directly to op in 1 block
int length = shape::length(hostXShapeInfo);
int block = nd4j::math::nd4j_min<int>(length, 256);
launchDims.x = 1;
launchDims.y = block;
launchDims.z += (block * sizeof(float) * 4);
functions::transform::Transform<float>::executeTransformShaped(launchDims, stream, opNum, dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), allocPointer, reductionPointer, devTadShapeInfo, devTadOffsets);
} else {
// going for blockwise specials
auto shape = shape::shapeOf(hostXShapeInfo);
switch (opNum) {
case 40: // LogSoftMax
case 39: // SoftMax Derivative
case 38: {// softmax
Nd4jPointer tempPointers[16];
tempPointers[0] = extraPointers[0];
tempPointers[1] = extraPointers[1];
tempPointers[2] = extraPointers[2];
tempPointers[3] = extraPointers[3];
tempPointers[4] = extraPointers[4];
tempPointers[5] = extraPointers[5];
tempPointers[6] = extraPointers[6];
tempPointers[7] = extraPointers[7];
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[12];
tempPointers[13] = extraPointers[13];
tempPointers[14] = extraPointers[14];
tempPointers[15] = extraPointers[15];
Nd4jLong maxShape[2] = {shape::shapeOf(hostXShapeInfo)[0], 1};
auto hostMaxShapeBuffer = shape::shapeBuffer(2, maxShape);
tempPointers[7] = (Nd4jPointer) hostMaxShapeBuffer;
tempPointers[8] = (Nd4jPointer) hostMaxShapeBuffer;
hipLaunchKernelGGL(( prepareShapeBuffer) , dim3(1), dim3(1), 128, *stream , dimension, maxDimension, maxShapeBuffer, shape[0]);
DEBUG_KERNEL(stream, opNum);
//shape::printShapeInfo(maxShapeBuffer);
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
// max 3
execReduceFloat(tempPointers, 3, dx, xShapeInfo, extraParams, special,
maxShapeBuffer, maxDimension, 1);
DEBUG_KERNEL(stream, opNum);
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[10];
tempPointers[13] = extraPointers[11];
// sub 1
execBroadcastFloat(tempPointers, 1, dx, xShapeInfo, special,
maxShapeBuffer, result, resultShapeInfo, dimension, 1);
DEBUG_KERNEL(stream, opNum);
// exp 3
execTransformFloat(extraPointers, 3, result, resultShapeInfo, result, resultShapeInfo, extraParams);
DEBUG_KERNEL(stream, opNum);
tempPointers[8] = tempPointers[7];
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
//sum 1
execReduceFloat(tempPointers, 1, result, resultShapeInfo, extraParams, special,
maxShapeBuffer, maxDimension, 1);
DEBUG_KERNEL(stream, opNum);
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[10];
tempPointers[13] = extraPointers[11];
// divide 3
execBroadcastFloat(tempPointers, 3, result, resultShapeInfo, special,
maxShapeBuffer, result, resultShapeInfo, dimension, 1);
DEBUG_KERNEL(stream, opNum);
// log 3
if (opNum == 40)
execTransformFloat(extraPointers, 5, result, resultShapeInfo, result, resultShapeInfo, extraParams);
else if (opNum == 39)
execTransformFloat(extraPointers, 42, result, resultShapeInfo, result, resultShapeInfo, extraParams);
nd4j::DebugHelper::checkErrorCode(stream, "SoftMaxFloat(...) failed");
delete hostMaxShapeBuffer;
break;
}
case 41: {
// IsMax along all dimensions
bool scalarCheat = false;
if (extraParams == nullptr) {
scalarCheat = true;
}
if (scalarCheat) {
// if that's 1D input - we'll just go for single dim IMax op call + filler
int maxIdx = (int) execIndexReduceScalarFloat(extraPointers, 0, dx, xShapeInfo, extraParams);
int targetIdx = 0;
if (shape::order(hostXShapeInfo) == 'c' || shape::order(hostXShapeInfo) == 'f' && maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1] >= shape::length(hostXShapeInfo))
targetIdx = maxIdx;
else
targetIdx = maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1];
hipLaunchKernelGGL(( fillIsMaxFloat), dim3(1), dim3(128), 1536, *stream , result, shape::length(hostXShapeInfo), targetIdx);
nd4j::DebugHelper::checkErrorCode(stream, "Legacy IsMax(...) failed");
} else {
// going for dimension-based IsMax
auto tadMaxShapeInfo = reinterpret_cast<Nd4jLong *> (extraPointers[10]);
auto tadMaxOffsets = reinterpret_cast<Nd4jLong *> (extraPointers[11]);
auto dimension = reinterpret_cast<int *> (extraPointers[15]);
special = reinterpret_cast<float *>(extraPointers[17]);
int dimensionLength = getDeviceId(extraPointers[18]);
// we call for IMax on specified dimension
execIndexReduceFloat(extraPointers, 0, dx, xShapeInfo, extraParams, special, hostYShapeInfo, dimension, dimensionLength);
DEBUG_KERNEL(stream, opNum);
// at this point, all IMax indexes are gathered, and we execute
hipLaunchKernelGGL(( fillDimensionalIsMaxFloat), dim3(blockLimit), dim3(64), funcAttributes[36].sharedSizeBytes, *stream, special, hostYShapeInfo, result, resultShapeInfo, tadMaxShapeInfo, dimension, dimensionLength, tadMaxOffsets );
nd4j::DebugHelper::checkErrorCode(stream, "Legacy IsMax(...) failed");
}
break;
}
default: {
printf("Bad case for transformFloat\n");
break;
}
}
}
} else {
// we're enforcing larger grids for Col2Im & Im2Col
// TODO: for high-end gpus we might use higher values here
if (opNum == 37 || opNum == 36 || opNum == 71) {
launchDims.x = 512;
launchDims.y = 512;
launchDims.z += 512 * sizeof(float);
} else if (opNum == 70) {
// we'll be using shared memory to speed up reverse
launchDims.z += launchDims.y * sizeof(float);
}
// histogram op requies additional memory chunk :(
if (opNum == 48) {
int length = shape::length(hostZShapeInfo);
hipMalloc(reinterpret_cast<void **>(&maskedAllocPointer), length * launchDims.x * sizeof(float));
}
if (opNum == 71) {
launchDims.z += 512 * sizeof(float);
}
/*
DISPATCH_SIMPLE(transformShaped, float,
PARAMS(dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo,
shape::rank(hostZShapeInfo), maskedAllocPointer, reductionPointer, devTadShapeInfo, devTadOffsets), OPS_A(TRANSFORM_OPS))
*/
functions::transform::Transform<float>::executeTransformShaped(launchDims, stream, opNum, dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), maskedAllocPointer, reductionPointer, devTadShapeInfo, devTadOffsets);
// we need guaranteed sync here, due to temp memory release
if (opNum == 48)
nd4j::DebugHelper::checkErrorCode(stream, "Legacy HistogramFloat(...) failed");
// release memory chunk
if (opNum == 48) {
hipFree(reinterpret_cast<void *>(maskedAllocPointer));
}
}
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execTransformHalf(Nd4jPointer *extraPointers,int opNum,
float16 *dx,
Nd4jLong *xShapeInfo,
float16 *result,
Nd4jLong *resultShapeInfo,
float16 *extraParams) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostYShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[7]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H20 opNum:[%i]\n", opNum);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
int *maskedAllocPointer = allocPointer;
float16 *specialPointer = reinterpret_cast<float16 *>(extraPointers[6]);
int *dimension = reinterpret_cast<int *>(specialPointer);
int *maxDimension = dimension + 1;
auto maxShapeBuffer = reinterpret_cast<Nd4jLong *>(maxDimension + 1);
float16 * special = reinterpret_cast<float16 *>(maxShapeBuffer + (MAX_RANK * 2 + 4));
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[1]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH20 opNum:[%i]\n", opNum);
auto devTadShapeInfo = reinterpret_cast<Nd4jLong *> (extraPointers[10]);
auto devTadOffsets = reinterpret_cast<Nd4jLong *> (extraPointers[11]);
// simple trick to get workaround over reductions into scalar
// SoftMax, SoftMaxDerivative, LogSoftMax, IsMax
if (opNum >= 38 && opNum <= 41) {
if (shape::isVector(hostXShapeInfo) && opNum != 41) {
// if that's vector, we just go directly to op in 1 block
auto length = shape::length(hostXShapeInfo);
auto block = nd4j::math::nd4j_min<Nd4jLong>(length, 256);
launchDims.x = 1;
launchDims.y = block;
launchDims.z += (block * sizeof(float16) * 4);
functions::transform::Transform<float16>::executeTransformShaped(launchDims, stream, opNum, dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), allocPointer, reductionPointer, devTadShapeInfo, devTadOffsets);
} else {
// going for blockwise specials
auto shape = shape::shapeOf(hostXShapeInfo);
switch (opNum) {
case 40: // LogSoftMax
case 39: // SoftMax Derivative
case 38: {// softmax
Nd4jPointer tempPointers[16];
tempPointers[0] = extraPointers[0];
tempPointers[1] = extraPointers[1];
tempPointers[2] = extraPointers[2];
tempPointers[3] = extraPointers[3];
tempPointers[4] = extraPointers[4];
tempPointers[5] = extraPointers[5];
tempPointers[6] = extraPointers[6];
tempPointers[7] = extraPointers[7];
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[12];
tempPointers[13] = extraPointers[13];
tempPointers[14] = extraPointers[14];
tempPointers[15] = extraPointers[15];
Nd4jLong maxShape[2] = {shape::shapeOf(hostXShapeInfo)[0], 1};
auto hostMaxShapeBuffer = shape::shapeBuffer(2, maxShape);
tempPointers[7] = (Nd4jPointer) hostMaxShapeBuffer;
tempPointers[8] = (Nd4jPointer) hostMaxShapeBuffer;
// FIXME: fix this
hipLaunchKernelGGL(( prepareShapeBuffer) , dim3(1), dim3(1), 128, *stream , dimension, maxDimension, maxShapeBuffer, shape[0]);
DEBUG_KERNEL(stream, opNum);
//shape::printShapeInfo(maxShapeBuffer);
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
// max 3
execReduceHalf(tempPointers, 3, dx, xShapeInfo, extraParams, special,
maxShapeBuffer, maxDimension, 1);
DEBUG_KERNEL(stream, opNum);
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[10];
tempPointers[13] = extraPointers[11];
// sub 1
execBroadcastHalf(tempPointers, 1, dx, xShapeInfo, special,
maxShapeBuffer, result, resultShapeInfo, dimension, 1);
DEBUG_KERNEL(stream, opNum);
// exp 3
execTransformHalf(extraPointers, 3, result, resultShapeInfo, result, resultShapeInfo, extraParams);
DEBUG_KERNEL(stream, opNum);
tempPointers[8] = tempPointers[7];
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
//sum 1
execReduceHalf(tempPointers, 1, result, resultShapeInfo, extraParams, special,
maxShapeBuffer, maxDimension, 1);
DEBUG_KERNEL(stream, opNum);
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[10];
tempPointers[13] = extraPointers[11];
// divide 3
execBroadcastHalf(tempPointers, 3, result, resultShapeInfo, special,
maxShapeBuffer, result, resultShapeInfo, dimension, 1);
if (opNum == 40) {
DEBUG_KERNEL(stream, opNum);
execTransformHalf(tempPointers, 47, result, resultShapeInfo, result, resultShapeInfo, extraParams);
}
DEBUG_KERNEL(stream, opNum);
// log 3
if (opNum == 40)
execTransformHalf(extraPointers, 5, result, resultShapeInfo, result, resultShapeInfo, extraParams);
else if (opNum == 39)
execTransformHalf(extraPointers, 42, result, resultShapeInfo, result, resultShapeInfo, extraParams);
nd4j::DebugHelper::checkErrorCode(stream, "Legacy SoftMaxHalf(...) failed");
delete hostMaxShapeBuffer;
break;
}
case 41: {
// IsMax along all dimensions
bool scalarCheat = false;
if (extraParams == nullptr) {
scalarCheat = true;
}
if (scalarCheat) {
// 1D input, aka vector
int maxIdx = (int) execIndexReduceScalarHalf(extraPointers, 0, dx, xShapeInfo, extraParams);
int targetIdx = 0;
if (shape::order(hostXShapeInfo) == 'c' || shape::order(hostXShapeInfo) == 'f' && maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1] >= shape::length(hostXShapeInfo))
targetIdx = maxIdx;
else
targetIdx = maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1];
hipLaunchKernelGGL(( fillIsMaxHalf), dim3(1), dim3(128), 1536, *stream , result, shape::length(hostXShapeInfo), targetIdx);
} else {
// going for dimension-based IsMax
auto tadMaxShapeInfo = reinterpret_cast<Nd4jLong *> (extraPointers[10]);
auto tadMaxOffsets = reinterpret_cast<Nd4jLong *> (extraPointers[11]);
int *dimension = reinterpret_cast<int *> (extraPointers[15]);
special = reinterpret_cast<float16 *>(extraPointers[17]);
int dimensionLength = getDeviceId(extraPointers[18]);
// we call for IMax on specified dimension
execIndexReduceHalf(extraPointers, 0, dx, xShapeInfo, extraParams, special, hostYShapeInfo, dimension, dimensionLength);
DEBUG_KERNEL(stream, opNum);
// at this point, all IMax indexes are gathered, and we execute
hipLaunchKernelGGL(( fillDimensionalIsMaxHalf), dim3(blockLimit), dim3(64), funcAttributes[36].sharedSizeBytes, *stream, special, hostYShapeInfo, result, resultShapeInfo, tadMaxShapeInfo, dimension, dimensionLength, tadMaxOffsets );
nd4j::DebugHelper::checkErrorCode(stream, "Legacy IsMaxHalf(...) failed");
}
break;
}
default: {
printf("Bad case for transformHalf\n");
break;
}
}
}
} else {
// Im2Col & Col2Im enforced grids
if (opNum == 37 || opNum == 36 || opNum == 71) {
launchDims.x = 512;
launchDims.y = 512;
launchDims.z += 512 * sizeof(float16);
} else if (opNum == 70) {
// we'll be using shared memory to speed up reverse
launchDims.z += launchDims.y * sizeof(float16);
}
// Histogram op requires additional memory chunk
if (opNum == 48) {
int length = shape::length(hostZShapeInfo);
hipMalloc(reinterpret_cast<void **>(&maskedAllocPointer), length * launchDims.x * sizeof(float16));
}
if (opNum == 71) {
launchDims.z += 512 * sizeof(float16);
}
functions::transform::Transform<float16>::executeTransformShaped(launchDims, stream, opNum, dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), maskedAllocPointer, reductionPointer, devTadShapeInfo, devTadOffsets);
// we need guaranteed sync here, due to temp memory release
if (opNum == 48)
nd4j::DebugHelper::checkErrorCode(stream, "Legacy HistogramHalf(...) failed");
// release that histogram memory chunk
if (opNum == 48) {
hipFree(reinterpret_cast<void *>(maskedAllocPointer));
}
}
DEBUG_KERNEL(stream, opNum);
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
float *dx,
Nd4jLong *xShapeInfo,
float *result,
Nd4jLong *resultShapeInfo,
float *extraParams,
Nd4jLong *xIndexes,
Nd4jLong *resultIndexes) {
///
}
void NativeOps::execTransformHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *dx,
Nd4jLong *xShapeInfo,
float16 *result,
Nd4jLong *resultShapeInfo,
float16 *extraParams,
Nd4jLong *xIndexes,
Nd4jLong *resultIndexes) {
///
}
template <typename T>
__device__ void flattenKernelGeneric(int dOffset,
char order,
T *result,
Nd4jLong *resultShapeInfo,
T *input,
Nd4jLong *inputShapeInfo, int *allocationPointer) {
__shared__ UnifiedSharedMemory *manager;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
manager = new(shmem) UnifiedSharedMemory(reinterpret_cast<int *>(shmem));
manager->init(sizeof(UnifiedSharedMemory), 4, 4, sizeof(shape::TAD), 2);
}
__syncthreads();
Nd4jLong tid = blockIdx.x * blockDim.x + threadIdx.x;
auto zShape = shape::shapeOf(resultShapeInfo);
auto zStride = shape::stride(resultShapeInfo);
auto yShape = shape::shapeOf(inputShapeInfo);
auto yStride = shape::stride(inputShapeInfo);
auto yOrder = shape::order(inputShapeInfo);
auto len = shape::length(inputShapeInfo);
auto resultEWS = shape::elementWiseStride(resultShapeInfo);
auto inputEWS = shape::elementWiseStride(inputShapeInfo);
if (yOrder == order) {
if (resultEWS >= 1 && inputEWS >= 1) {
for (int i = tid; i < len; i+= gridDim.x * blockDim.x) {
result[i * resultEWS + dOffset] = input[i * inputEWS];
}
} else {
auto rank = shape::rank(inputShapeInfo);
Nd4jLong coord[MAX_RANK];
if(order == 'f') {
for(auto i = tid; i < len; i+= gridDim.x * blockDim.x) {
shape::ind2sub(rank,yShape,i,coord);
auto offset = shape::getOffset(0,yShape,yStride,coord,rank);
result[i + dOffset] = input[offset];
}
}
else {
for(auto i = tid; i < len; i+= gridDim.x * blockDim.x) {
shape::ind2subC(rank,yShape,i,coord);
auto offset = shape::getOffset(0,yShape,yStride,coord,rank);
result[i + dOffset] = input[offset];
}
}
}
} else {
int rank = shape::rank(inputShapeInfo);
Nd4jLong coord[MAX_RANK];
if(order == 'f') {
for(int i = tid; i < len; i+= gridDim.x * blockDim.x) {
shape::ind2sub(rank,yShape,i,coord);
auto offset = shape::getOffset(0,yShape,yStride,coord,rank);
result[i+dOffset] = input[offset];
}
}
else {
for(int i = tid; i < len; i+= gridDim.x * blockDim.x) {
shape::ind2subC(rank,yShape,i,coord);
auto offset = shape::getOffset(0,yShape,yStride,coord,rank);
result[i+dOffset] = input[offset];
}
}
}
}
extern "C" __global__ void flattenKernelDouble(int offset,
char order,
double *result,
Nd4jLong *resultShapeInfo,
double *input,
Nd4jLong *inputShapeInfo, int *allocationPointer) {
flattenKernelGeneric<double>(
offset,
order, result,
resultShapeInfo,
input,
inputShapeInfo,
allocationPointer);
}
extern "C" __global__ void flattenKernelFloat(int offset,
char order,
float *result,
Nd4jLong *resultShapeInfo,
float *input,
Nd4jLong *inputShapeInfo, int *allocationPointer) {
flattenKernelGeneric<float>(
offset,
order,
result,
resultShapeInfo,
input,
inputShapeInfo,
allocationPointer);
}
extern "C" __global__ void flattenKernelHalf(int offset,
char order,
float16 *result,
Nd4jLong *resultShapeInfo,
float16 *input,
Nd4jLong *inputShapeInfo, int *allocationPointer) {
flattenKernelGeneric<float16>(
offset,
order,
result,
resultShapeInfo,
input,
inputShapeInfo,
allocationPointer);
}
/**
* Append an input array
* to the end of a flat array
* in a particular order
* @param offset the offset of the array to start at
* @param order the order
* @param result the result array
* @param resultShapeInfo the shape info for te array
* @param input the input for the array
* @param inputShapeInfo the shape information for that array
*/
void NativeOps::flattenFloat(
Nd4jPointer *extraPointers,
int offset,
char order,
float *result,
Nd4jLong *resultShapeInfo,
float *input,
Nd4jLong *inputShapeInfo) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostYShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[7]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F22 opNum:[7]\n");
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostYShapeInfo), 2, funcAttributes[30]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF222 opNum:[7]\n");
hipLaunchKernelGGL(( flattenKernelFloat), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream, offset, order, result, resultShapeInfo, input, inputShapeInfo, allocPointer);
DEBUG_KERNEL(stream, -1);
}
void NativeOps::flattenHalf(
Nd4jPointer *extraPointers,
int offset,
char order,
float16 *result,
Nd4jLong *resultShapeInfo,
float16 *input,
Nd4jLong *inputShapeInfo) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostYShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[7]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H22 opNum:[7]\n");
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostYShapeInfo), 2, funcAttributes[30]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH222 opNum:[7]\n");
hipLaunchKernelGGL(( flattenKernelHalf), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream, offset, order, result, resultShapeInfo, input, inputShapeInfo, allocPointer);
DEBUG_KERNEL(stream, -1);
}
/**
* Append an input array
* to the end of a flat array
* in a particular order
* @param offset the offset of the array to start at
* @param order the order
* @param result the result array
* @param resultShapeInfo the shape info for te array
* @param input the input for the array
* @param inputShapeInfo the shape information for that array
*/
void NativeOps::flattenDouble(
Nd4jPointer *extraPointers,
int offset,
char order,
double *result,
Nd4jLong *resultShapeInfo,
double *input,
Nd4jLong *inputShapeInfo) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostYShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[7]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D30 opNum:[7]\n");
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostYShapeInfo), 2, funcAttributes[34]);
hipLaunchKernelGGL(( flattenKernelDouble), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream, offset, order, result, resultShapeInfo, input, inputShapeInfo, allocPointer);
DEBUG_KERNEL(stream, -1);
}
void NativeOps::checkP2P() {
int curDevice = 0;
hipGetDevice(&curDevice);
int devCnt = 0;
hipGetDeviceCount(&devCnt);
if (curDevice < 0 && curDevice > devCnt)
curDevice = 0;
bool tempSupport = true;
if (devCnt > 1) {
for (int x = 0; x < devCnt; x++) {
for (int y = 0; y < devCnt; y++) {
if (x == y)
continue;
int canAccess = 0;
hipSetDevice(x);
hipDeviceCanAccessPeer(&canAccess, x , y);
if (!canAccess) {
tempSupport = false;
break;
}
}
}
supportedP2P = tempSupport;
hipSetDevice(curDevice);
} else {
// if we have only 1 device - we say that we support P2P, since all data will be on 1 device
supportedP2P = true;
}
}
void NativeOps::enableP2P(bool enable) {
if (enable == allowedP2P)
return;
int curDevice = 0;
hipGetDevice(&curDevice);
int devCnt = 0;
hipGetDeviceCount(&devCnt);
if (curDevice < 0 && curDevice > devCnt)
curDevice = 0;
if (devCnt > 1) {
for (int x = 0; x < devCnt; x++) {
for (int y = 0; y < devCnt; y++) {
if (x == y)
continue;
int canAccess = 0;
hipSetDevice(x);
hipDeviceCanAccessPeer(&canAccess, x , y);
if (canAccess) {
if (enable) {
hipDeviceEnablePeerAccess(y, 0);
} else {
hipDeviceDisablePeerAccess(y);
}
} else {
if (nd4j::Environment::getInstance()->isVerbose()) printf("Peer access [%i] -> [%i] isn't possible\n", x, y);
}
}
}
hipSetDevice(curDevice);
}
allowedP2P = enable;
hipSetDevice(curDevice);
}
bool NativeOps::isP2PAvailable() {
return supportedP2P;
}
void NativeOps::initializeDevicesAndFunctions() {
int devCnt = 0;
hipGetDeviceCount(&devCnt);
deviceProperties = new hipDeviceProp_t[devCnt];
for (int i = 0; i < devCnt; i++) {
hipSetDevice(i);
hipGetDeviceProperties(&deviceProperties[i], i);
hipDeviceSetLimit(hipLimitStackSize, 4096);
}
hipSetDevice(0);
checkP2P();
// enabling p2p gpu access if it's supported
if (supportedP2P && devCnt > 1)
enableP2P(allowedP2P);
//hipFuncGetAttributes(&funcAttributes[0], (void *)transformFloatIndexes);
//void (*transformFloatPointer1)(int opNum, float *dy,int *shapeInfo, int xRank, float *params, float *result,int *resultShapeInfo, int zRank, int *allocationPointer, float *reductionPointer) = transformFloat;
// FIXME
//hipFuncGetAttributes(&funcAttributes[1], transformFloatIndexes);
//void (*transformFloatPointer2)(int opNum, Nd4jLong n, float *dy, int incy, float *params, float *result,int resultStride, int *allocationPointer, float *reductionPointer) = transformFloat;
// FIXME
//hipFuncGetAttributes(&funcAttributes[2], transformFloatIndexes);
//hipFuncGetAttributes(&funcAttributes[3], (void *)functions::summarystats::summaryStatsReduceFloat);
//hipFuncGetAttributes(&funcAttributes[4], (void *)scalarFloatIndexes);
// void (*scalarFloatPointer1)(int opNum, float dx,float *dy, int *shapeInfo, int xRank, float *params, float *result,int *resultShapeInfo, int zRank, int *allocPointer) = scalarFloat;
// hipFuncGetAttributes(&funcAttributes[5], scalarFloatIndexes);
// void (*scalarFloatPointer2)(int opNum, Nd4jLong n,float dx, float *dy, int incy, float *params, float *result,int resultStride, int *allocPointer) = scalarFloat;
// hipFuncGetAttributes(&funcAttributes[6], scalarFloatIndexes);
hipFuncGetAttributes(&funcAttributes[7], reduce3Float);
hipFuncGetAttributes(&funcAttributes[8], reduce3Float);
// printf("reduceFloat regs: [%i], static shmem: [%i]\n", funcAttributes[8].numRegs, funcAttributes[8].sharedSizeBytes);
hipFuncGetAttributes(&funcAttributes[28], reduce3Float); // 1D
// printf("reduceFloat1D regs: [%i], static shmem: [%i]\n", funcAttributes[28].numRegs, funcAttributes[28].sharedSizeBytes);
hipFuncGetAttributes(&funcAttributes[29], reduce3Float); // 6D
// printf("reduceFloat6D regs: [%i], static shmem: [%i]\n", funcAttributes[29].numRegs, funcAttributes[29].sharedSizeBytes);
hipFuncGetAttributes(&funcAttributes[30], flattenKernelFloat);
hipFuncGetAttributes(&funcAttributes[31], concatKernelFloat);
// hipFuncGetAttributes(&funcAttributes[9], pairWiseTransformFloat);
// hipFuncGetAttributes(&funcAttributes[10], pairWiseTransformFloatIndex);
// hipFuncGetAttributes(&funcAttributes[11], pairWiseTransformStridedFloat);
hipFuncGetAttributes(&funcAttributes[12], reduce3Float);
hipFuncGetAttributes(&funcAttributes[13], reduce3Float);
///////////////////////////////////////// Doubles are separate, just in case of...
//hipFuncGetAttributes(&funcAttributes[14], transformDoubleIndexes);
// void (*transformDoublePointer1)(int opNum, double *dy, int *shapeInfo, int xRank, double *params, double *result,int *resultShapeInfo, int zRank, int *allocationPointer, double *reductionPointer) = transformDouble;
// FIXME
//hipFuncGetAttributes(&funcAttributes[15], transformDoubleIndexes);
//void (*transformDoublePointer2)(int opNum, Nd4jLong n, double *dy, int incy, double *params, double *result,int resultStride, int *allocationPointer, double *reductionPointer) = transformDouble;
// FIXME
//hipFuncGetAttributes(&funcAttributes[16], transformDoubleIndexes);
//hipFuncGetAttributes(&funcAttributes[17], functions::summarystats::summaryStatsReduceDouble);
// hipFuncGetAttributes(&funcAttributes[18], scalarDoubleIndexes);
//void (*scalarDoublePointer1)(int opNum, double dx,double *dy, int *shapeInfo, int xRank, double *params, double *result,int *resultShapeInfo, int zRank, int *allocPointer) = scalarDouble;
// hipFuncGetAttributes(&funcAttributes[19], scalarDoubleIndexes);
//void (*scalarDoublePointer2)(int opNum, Nd4jLong n,double dx, double *dy, int incy, double *params, double *result,int resultStride, int *allocPointer) = scalarDouble;
// hipFuncGetAttributes(&funcAttributes[20], scalarDoubleIndexes);
hipFuncGetAttributes(&funcAttributes[21], reduce3Double);
hipFuncGetAttributes(&funcAttributes[22], reduce3Float);
// hipFuncGetAttributes(&funcAttributes[23], pairWiseTransformDouble);
// hipFuncGetAttributes(&funcAttributes[24], pairWiseTransformDoubleIndex);
// hipFuncGetAttributes(&funcAttributes[25], pairWiseTransformStridedDouble);
hipFuncGetAttributes(&funcAttributes[26], reduce3Double);
hipFuncGetAttributes(&funcAttributes[27], reduce3Double);
hipFuncGetAttributes(&funcAttributes[32], reduce3Float); // 1D
hipFuncGetAttributes(&funcAttributes[33], reduce3Float); // 6D
hipFuncGetAttributes(&funcAttributes[34], flattenKernelDouble);
hipFuncGetAttributes(&funcAttributes[35], concatKernelDouble);
hipFuncGetAttributes(&funcAttributes[36], fillDimensionalIsMaxFloat);
hipFuncGetAttributes(&funcAttributes[37], fillDimensionalIsMaxDouble);
hipFuncGetAttributes(&funcAttributes[38], concatKernelScalarFloat);
hipFuncGetAttributes(&funcAttributes[39], concatKernelScalarDouble);
hipFuncGetAttributes(&funcAttributes[40], concatKernelVStackFloat);
hipFuncGetAttributes(&funcAttributes[41], concatKernelVStackDouble);
hipFuncGetAttributes(&funcAttributes[42], concatKernelHStackFloat);
hipFuncGetAttributes(&funcAttributes[43], concatKernelHStackDouble);
/////////////////////////
hipFuncGetAttributes(&funcAttributes[44], averagingKernelHalf);
hipFuncGetAttributes(&funcAttributes[45], averagingKernelFloat);
hipFuncGetAttributes(&funcAttributes[46], averagingKernelDouble);
//
//hipFuncGetAttributes(&funcAttributes[47], scalarAlongDimension_0_float);
//hipFuncGetAttributes(&funcAttributes[48], scalarAlongDimension_0_float16);
//hipFuncGetAttributes(&funcAttributes[48], scalarAlongDimension_0_double);
}
void NativeOps::initializeFunctions(Nd4jPointer *functions) {
nd4j::BlasHelper::getInstance()->initializeDeviceFunctions(functions);
/*
this->hipblasSgemv = (CublasSgemv)functions[0];
this->hipblasDgemv = (CublasDgemv)functions[1];
this->hipblasHgemm = (CublasHgemm)functions[2];
this->hipblasSgemm = (CublasSgemm)functions[3];
this->hipblasDgemm = (CublasDgemm)functions[4];
this->cublasSgemmEx = (CublasSgemmEx)functions[5];
this->hipblasHgemmBatched = (CublasHgemmBatched)functions[6];
this->hipblasSgemmBatched = (CublasSgemmBatched)functions[7];
this->hipblasDgemmBatched = (CublasDgemmBatched)functions[8];
*/
}
/**
* This method acquires memory chunk of requested size on host side
*
* @param pointer pointer that'll be used for allocation
* @param memorySize memory size, in bytes
* @param flags optional parameter
*/
Nd4jPointer NativeOps::mallocHost(Nd4jLong memorySize, int flags) {
Nd4jPointer pointer;
// hipHostMallocMapped |hipHostMallocPortable
hipError_t res = hipHostMalloc(reinterpret_cast<void **>(&pointer), memorySize, hipHostMallocDefault);
if (res != 0)
pointer = 0L;
return pointer;
}
/**
* This method acquires memory chunk of requested size on specified device
*
* @param pointer pointer that'll be used for allocation
* @param memorySize memory size, in bytes
* @param ptrToDeviceId pointer to deviceId. For cuda that's just and int, for OpenCL that's pointer to device_id, etc
* @param flags optional parameter
*/
Nd4jPointer NativeOps::mallocDevice(Nd4jLong memorySize, Nd4jPointer ptrToDeviceId, int flags) {
Nd4jPointer pointer;
hipError_t res = hipMalloc(reinterpret_cast<void **>(&pointer), memorySize);
if (res != 0)
pointer = 0L;
return pointer;
}
/**
* This method releases previously allocated host memory space
*
* @param pointer pointer that'll be freed
*/
int NativeOps::freeHost(Nd4jPointer pointer) {
hipError_t res = hipHostFree(reinterpret_cast<void *>(pointer));
if (res != 0)
pointer = 0L;
return 1L;
}
/**
* This method releases previously allocated memory space on device
*
* @param pointer pointer that'll be freed
* @param ptrToDeviceId pointer to deviceId.
*/
int NativeOps::freeDevice(Nd4jPointer pointer, Nd4jPointer ptrToDeviceId) {
hipError_t res = hipFree(reinterpret_cast<void *>(pointer));
if (res != 0)
pointer = 0L;
return 1L;
}
Nd4jPointer NativeOps::createContext() {
return 0L;
}
Nd4jPointer NativeOps::createStream() {
Nd4jPointer nativeStream = (Nd4jPointer) malloc(sizeof(hipStream_t));
CHECK_ALLOC(nativeStream, "Failed to allocate memory for new CUDA stream");
hipError_t result = hipStreamCreate(reinterpret_cast<hipStream_t *>(&nativeStream));
checkCudaErrors(result);
if (result != 0)
throw std::runtime_error("hipStreamCreate(...) failed");
return nativeStream;
}
Nd4jPointer NativeOps::createEvent() {
Nd4jPointer nativeEvent= (Nd4jPointer) malloc(sizeof(hipEvent_t));
CHECK_ALLOC(nativeEvent, "Failed to allocate new CUDA event buffer");
hipError_t result = hipEventCreateWithFlags(reinterpret_cast<hipEvent_t *>(&nativeEvent), hipEventDisableTiming);
checkCudaErrors(result);
if (result != 0)
throw std::runtime_error("hipEventCreateWithFlags(...) failed");
return nativeEvent;
}
int NativeOps::registerEvent(Nd4jPointer event, Nd4jPointer stream) {
hipEvent_t *pEvent = reinterpret_cast<hipEvent_t *>(&event);
hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&stream);
hipError_t result = hipEventRecord(*pEvent, *pStream);
checkCudaErrors(result);
if (result != 0)
throw std::runtime_error("hipEventRecord(...) failed");
return 1;
}
int NativeOps::setDevice(Nd4jPointer ptrToDeviceId) {
int deviceId = getDeviceId(ptrToDeviceId);
hipError_t result = hipSetDevice(deviceId);
checkCudaErrors(result);
if (result != 0)
throw std::runtime_error("hipSetDevice(...) failed");
return 1;
}
Nd4jLong NativeOps::getDeviceFreeMemory(Nd4jPointer ptrToDeviceId) {
int device = getDeviceId(ptrToDeviceId);
int orig = -1;
hipGetDevice(&orig);
if (device >= 0 && device != orig) {
hipSetDevice(device);
}
size_t memFree = 0;
size_t memTotal = 0;
hipMemGetInfo(&memFree, &memTotal);
if (device >= 0 && device != orig) {
hipSetDevice(orig);
}
return (Nd4jLong) memFree;
}
Nd4jLong NativeOps::getDeviceTotalMemory(Nd4jPointer ptrToDeviceId) {
int device = getDeviceId(ptrToDeviceId);
int orig = -1;
hipGetDevice(&orig);
if (device >= 0 && device != orig) {
hipSetDevice(device);
}
size_t memFree = 0;
size_t memTotal = 0;
hipMemGetInfo(&memFree, &memTotal);
if (device >= 0 && device != orig) {
hipSetDevice(orig);
}
return (Nd4jLong) memTotal;
}
int NativeOps::memcpy(Nd4jPointer dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) {
return memcpyAsync(dst, src, size, flags, reserved);
}
int NativeOps::memcpyAsync(Nd4jPointer dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) {
hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&reserved);
hipMemcpyKind kind;
DEBUG_KERNEL(pStream, 0);
switch (flags) {
case 0: {
kind = hipMemcpyHostToHost;
}
break;
case 1: {
kind = hipMemcpyHostToDevice;
}
break;
case 2: {
kind = hipMemcpyDeviceToHost;
}
case 3: {
kind = hipMemcpyDeviceToDevice;
}
break;
default: {
printf("UNDEFINED MEMCPY!\n");
break;
}
}
hipError_t result = hipMemcpyAsync(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)), static_cast<size_t>(size), kind, *pStream);
if (result != 0) {
checkCudaErrors(result);
printf("Failed on [%lu] -> [%lu], size: [%i], direction: [%i], result: [%i]\n", src, dst, size, flags, static_cast<int>(result));
fflush(stdout);
fflush(stderr);
throw std::runtime_error("hipMemcpyAsync(...) failed");
//return 0L;
}
return 1;
}
int NativeOps::memset(Nd4jPointer dst, int value, Nd4jLong size, int flags, Nd4jPointer reserved) {
hipError_t result = hipMemset(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size));
checkCudaErrors(result);
if (result != 0)
throw std::runtime_error("hipMemset(...) failed");
return 1;
}
int NativeOps::memsetAsync(Nd4jPointer dst, int value, Nd4jLong size, int flags, Nd4jPointer reserved) {
hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&reserved);
hipError_t result = hipMemsetAsync(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size), *pStream);
checkCudaErrors(result);
if (result != 0)
throw std::runtime_error("hipMemsetAsync(...) failed");
return 1;
}
int NativeOps::destroyEvent(Nd4jPointer event) {
hipEvent_t *pEvent = reinterpret_cast<hipEvent_t *>(&event);
hipError_t result = hipEventDestroy(*pEvent);
checkCudaErrors(result);
if (result != 0)
throw std::runtime_error("cudaEvenDestroy(...) failed");
return 1;
}
int NativeOps::streamSynchronize(Nd4jPointer stream) {
hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&stream);
hipError_t result = hipStreamSynchronize(*pStream);
checkCudaErrors(result);
if (result != 0)
throw std::runtime_error("hipStreamSynchronize(...) failed");
return 1L;
}
int NativeOps::eventSynchronize(Nd4jPointer event) {
hipEvent_t *pEvent = reinterpret_cast<hipEvent_t *>(&event);
hipError_t result = hipEventSynchronize(*pEvent);
checkCudaErrors(result);
if (result != 0)
throw std::runtime_error("hipEventSynchronize(...) failed");
return 1L;
}
int NativeOps::getAvailableDevices() {
int devCnt = 0;
hipGetDeviceCount(&devCnt);
return devCnt;
}
void NativeOps::enableDebugMode(bool reallyEnable) {
nd4j::Environment::getInstance()->setDebug(reallyEnable);
}
void NativeOps::setGridLimit(int gridSize) {
if (gridSize > 8192)
gridSize = 8192;
if (gridSize < 1)
gridSize = 1;
blockLimit = gridSize;
}
int NativeOps::ompGetMaxThreads() {
return maxThreads;
}
int NativeOps::ompGetNumThreads() {
return maxThreads;
}
void NativeOps::setOmpNumThreads(int threads) {
if (threads > 1024)
threads = 1024;
if (threads < 32)
threads = 32;
maxThreads = threads;
}
void NativeOps::enableVerboseMode(bool reallyEnable) {
nd4j::Environment::getInstance()->setVerbose(reallyEnable);
}
int NativeOps::getDeviceMajor(Nd4jPointer ptrToDeviceId) {
int device = getDeviceId(ptrToDeviceId);
return deviceProperties[device].major;
}
int NativeOps::getDeviceMinor(Nd4jPointer ptrToDeviceId) {
int device = getDeviceId(ptrToDeviceId);
return deviceProperties[device].minor;
}
const char * NativeOps::getDeviceName(Nd4jPointer ptrToDeviceId) {
int device = getDeviceId(ptrToDeviceId);
return deviceProperties[device].name;
}
/**
* Concatneate multi array of the same shape together
* along a particular dimension
*/
void NativeOps::concatFloat(
Nd4jPointer *extraPointers,
int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
float *result,
Nd4jLong *resultShapeInfo,
Nd4jPointer *tadPointers,
Nd4jPointer *offsetPointers) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostShapePointers = reinterpret_cast<Nd4jLong **>(extraPointers[9]);
// numArrays will be used as number of TADs, so each block process 1 input
int smem = 0;
bool isVstack = false;
bool isScalar = true;
bool isHstack = false;
for (int i = 0; i < numArrays; i++) {
if (!shape::isScalar(hostShapePointers[i])) {
isScalar = false;
break;
}
}
if (!isScalar && dimension == 0 && shape::rank(hostXShapeInfo) == 2 && shape::order(hostXShapeInfo) == 'c' ) {
isVstack = true;
for (int i = 0; i < numArrays; i++) {
if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0 ||
shape::order(hostShapePointers[i]) != 'c') {
isVstack = false;
break;
}
}
}
// let's try to fit N-dimensional vstack
if (!isVstack && !isScalar && dimension == 0 && shape::order(hostXShapeInfo) == 'c') {
Nd4jLong length0 = shape::length(hostShapePointers[0]);
isVstack = true;
for (int i = 0; i < numArrays; i++) {
if (shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c' || length0 != shape::length(hostShapePointers[i])) {
isVstack = false;
break;
}
}
}
if (!isScalar && !isVstack && dimension == 1 && shape::isVector(hostXShapeInfo)) {
isHstack = true;
for (int i = 0; i < numArrays; i++) {
if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0) {
isHstack = false;
break;
}
}
}
if (isScalar) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going scalar concat\n");
smem = funcAttributes[38].sharedSizeBytes;
hipLaunchKernelGGL(( concatKernelScalarFloat), dim3(128), dim3(128), smem, *stream, dimension, numArrays, reinterpret_cast<Nd4jPointer *>(data[0]), reinterpret_cast<Nd4jPointer *>(inputShapeInfo[0]), result, resultShapeInfo, reinterpret_cast<Nd4jPointer *>(tadPointers[0]), reinterpret_cast<Nd4jPointer *>(offsetPointers[0]));
} else if (isVstack) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going VStack concat\n");
smem = funcAttributes[40].sharedSizeBytes;
hipLaunchKernelGGL(( concatKernelVStackFloat), dim3(128), dim3(512), smem, *stream, dimension, numArrays, reinterpret_cast<Nd4jPointer *>(data[0]), reinterpret_cast<Nd4jPointer *>(inputShapeInfo[0]), result, resultShapeInfo, reinterpret_cast<Nd4jPointer *>(tadPointers[0]), reinterpret_cast<Nd4jPointer *>(offsetPointers[0]));
} else if (isHstack) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going HStack concat\n");
smem = funcAttributes[42].sharedSizeBytes;
hipLaunchKernelGGL(( concatKernelHStackFloat), dim3(128), dim3(128), smem, *stream, dimension, numArrays, reinterpret_cast<Nd4jPointer *>(data[0]), reinterpret_cast<Nd4jPointer *>(inputShapeInfo[0]), result, resultShapeInfo, reinterpret_cast<Nd4jPointer *>(tadPointers[0]), reinterpret_cast<Nd4jPointer *>(offsetPointers[0]));
} else {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going generic concat\n");
//smem = nd4j::math::nd4j_max<int>(funcAttributes[31].sharedSizeBytes + 768, 1280);
auto devZTadShape = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto devZOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
hipLaunchKernelGGL(( concatKernelFloat), dim3(512), dim3(512), 4096, *stream, dimension, numArrays, reinterpret_cast<Nd4jPointer *>(data[0]), reinterpret_cast<Nd4jPointer *>(inputShapeInfo[0]), result, resultShapeInfo, reinterpret_cast<Nd4jPointer *>(tadPointers[0]), reinterpret_cast<Nd4jPointer *>(offsetPointers[0]), devZTadShape, devZOffsets);
}
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("sharedMemory requested for concatFloat: [%i], registers: [%i]\n", smem, funcAttributes[31].numRegs);
nd4j::DebugHelper::checkErrorCode(stream, "Legacy ConcatFloat(...) failed");
}
void NativeOps::concatHalf(
Nd4jPointer *extraPointers,
int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
float16 *result,
Nd4jLong *resultShapeInfo,
Nd4jPointer *tadPointers,
Nd4jPointer *offsetPointers) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostShapePointers = reinterpret_cast<Nd4jLong **>(extraPointers[9]);
// numArrays will be used as number of TADs, so each block process 1 input
int smem = 0;
bool isVstack = false;
bool isScalar = true;
bool isHstack = false;
for (int i = 0; i < numArrays; i++) {
if (!shape::isScalar(hostShapePointers[i])) {
isScalar = false;
break;
}
}
if (!isScalar && dimension == 0 && shape::rank(hostXShapeInfo) == 2 && shape::order(hostXShapeInfo) == 'c' ) {
isVstack = true;
for (int i = 0; i < numArrays; i++) {
if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c') {
isVstack = false;
break;
}
}
}
// let's try to fit N-dimensional vstack
if (!isVstack && !isScalar && dimension == 0 && shape::order(hostXShapeInfo) == 'c') {
Nd4jLong length0 = shape::length(hostShapePointers[0]);
isVstack = true;
for (int i = 0; i < numArrays; i++) {
if (shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c' || length0 != shape::length(hostShapePointers[i])) {
isVstack = false;
break;
}
}
}
if (!isScalar && !isVstack && dimension == 1 && shape::isVector(hostXShapeInfo)) {
isHstack = true;
for (int i = 0; i < numArrays; i++) {
if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0) {
isHstack = false;
break;
}
}
}
if (isScalar) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going scalar concat\n");
smem = funcAttributes[38].sharedSizeBytes;
hipLaunchKernelGGL(( concatKernelScalarHalf), dim3(128), dim3(128), smem, *stream, dimension, numArrays, reinterpret_cast<Nd4jPointer *>(data[0]), reinterpret_cast<Nd4jPointer *>(inputShapeInfo[0]), result, resultShapeInfo, reinterpret_cast<Nd4jPointer *>(tadPointers[0]), reinterpret_cast<Nd4jPointer *>(offsetPointers[0]));
} else if (isVstack) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going VStack concat\n");
smem = funcAttributes[40].sharedSizeBytes;
hipLaunchKernelGGL(( concatKernelVStackHalf), dim3(128), dim3(128), smem, *stream, dimension, numArrays, reinterpret_cast<Nd4jPointer *>(data[0]), reinterpret_cast<Nd4jPointer *>(inputShapeInfo[0]), result, resultShapeInfo, reinterpret_cast<Nd4jPointer *>(tadPointers[0]), reinterpret_cast<Nd4jPointer *>(offsetPointers[0]));
} else if (isHstack) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going HStack concat\n");
smem = funcAttributes[42].sharedSizeBytes;
hipLaunchKernelGGL(( concatKernelHStackHalf), dim3(128), dim3(128), smem, *stream, dimension, numArrays, reinterpret_cast<Nd4jPointer *>(data[0]), reinterpret_cast<Nd4jPointer *>(inputShapeInfo[0]), result, resultShapeInfo, reinterpret_cast<Nd4jPointer *>(tadPointers[0]), reinterpret_cast<Nd4jPointer *>(offsetPointers[0]));
} else {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going generic concat\n");
//smem = nd4j::math::nd4j_max<int>(funcAttributes[31].sharedSizeBytes + 768, 1280);
auto devZTadShape = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto devZOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
hipLaunchKernelGGL(( concatKernelHalf), dim3(512), dim3(128), 4096, *stream, dimension, numArrays, reinterpret_cast<Nd4jPointer *>(data[0]), reinterpret_cast<Nd4jPointer *>(inputShapeInfo[0]), result, resultShapeInfo, reinterpret_cast<Nd4jPointer *>(tadPointers[0]), reinterpret_cast<Nd4jPointer *>(offsetPointers[0]), devZTadShape, devZOffsets);
}
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("sharedMemory requested for concatHalf: [%i], registers: [%i]\n", smem, funcAttributes[31].numRegs);
nd4j::DebugHelper::checkErrorCode(stream, "ConcatHalf(...) failed");
}
void NativeOps::specialConcatFloat(
Nd4jPointer *extraPointers,
int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
float *result,
Nd4jLong *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) {
nd4j::SpecialMethods<float>::concatCpuGeneric(
dimension,
numArrays,
data,
inputShapeInfo,
result,
resultShapeInfo);
}
void NativeOps::specialConcatHalf(
Nd4jPointer *extraPointers,
int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
float16 *result,
Nd4jLong *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) {
nd4j::SpecialMethods<float16>::concatCpuGeneric(
dimension,
numArrays,
data,
inputShapeInfo,
result,
resultShapeInfo);
}
/**
* Concatneate multi array of the same shape together
* along a particular dimension
*/
void NativeOps::specialConcatDouble(
Nd4jPointer *extraPointers,
int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
double *result,
Nd4jLong *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) {
nd4j::SpecialMethods<double>::concatCpuGeneric(
dimension,
numArrays,
data,
inputShapeInfo,
result,
resultShapeInfo);
}
/**
* Concatneate multi array of the same shape together
* along a particular dimension
*/
void NativeOps::concatDouble(
Nd4jPointer *extraPointers,
int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
double *result,
Nd4jLong *resultShapeInfo,
Nd4jPointer *tadPointers,
Nd4jPointer *offsetPointers) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostShapePointers = reinterpret_cast<Nd4jLong **>(extraPointers[9]);
// numArrays will be used as number of TADs, so each block process 1 input
int smem = 0;
bool isVstack = false;
bool isScalar = true;
bool isHstack = false;
for (int i = 0; i < numArrays; i++) {
if (!shape::isScalar(hostShapePointers[i])) {
isScalar = false;
break;
}
}
if (!isScalar && dimension == 0 && shape::rank(hostXShapeInfo) == 2 && shape::order(hostXShapeInfo) == 'c' ) {
isVstack = true;
for (int i = 0; i < numArrays; i++) {
if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c') {
isVstack = false;
break;
}
}
}
// let's try to fit N-dimensional vstack
if (!isVstack && !isScalar && dimension == 0 && shape::order(hostXShapeInfo) == 'c') {
Nd4jLong length0 = shape::length(hostShapePointers[0]);
isVstack = true;
for (int i = 0; i < numArrays; i++) {
if (shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c' || length0 != shape::length(hostShapePointers[i])) {
isVstack = false;
break;
}
}
}
if (!isScalar && !isVstack && dimension == 1 && shape::isVector(hostXShapeInfo)) {
isHstack = true;
for (int i = 0; i < numArrays; i++) {
if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0) {
isHstack = false;
break;
}
}
}
if (isScalar) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going scalar concat\n");
smem = funcAttributes[39].sharedSizeBytes;
hipLaunchKernelGGL(( concatKernelScalarDouble), dim3(128), dim3(128), smem, *stream, dimension, numArrays, reinterpret_cast<Nd4jPointer *>(data[0]), reinterpret_cast<Nd4jPointer *>(inputShapeInfo[0]), result, resultShapeInfo, reinterpret_cast<Nd4jPointer *>(tadPointers[0]), reinterpret_cast<Nd4jPointer *>(offsetPointers[0]));
} else if (isVstack) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going VStack concat\n");
smem = funcAttributes[41].sharedSizeBytes;
hipLaunchKernelGGL(( concatKernelVStackDouble), dim3(128), dim3(128), smem, *stream, dimension, numArrays, reinterpret_cast<Nd4jPointer *>(data[0]), reinterpret_cast<Nd4jPointer *>(inputShapeInfo[0]), result, resultShapeInfo, reinterpret_cast<Nd4jPointer *>(tadPointers[0]), reinterpret_cast<Nd4jPointer *>(offsetPointers[0]));
} else if (isHstack) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going HStack concat\n");
smem = funcAttributes[43].sharedSizeBytes;
hipLaunchKernelGGL(( concatKernelHStackDouble), dim3(128), dim3(128), smem, *stream, dimension, numArrays, reinterpret_cast<Nd4jPointer *>(data[0]), reinterpret_cast<Nd4jPointer *>(inputShapeInfo[0]), result, resultShapeInfo, reinterpret_cast<Nd4jPointer *>(tadPointers[0]), reinterpret_cast<Nd4jPointer *>(offsetPointers[0]));
} else {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going generic concat\n");
auto devZTadShape = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto devZOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
hipLaunchKernelGGL(( concatKernelDouble), dim3(512), dim3(128), 4096, *stream, dimension, numArrays, reinterpret_cast<Nd4jPointer *>(data[0]), reinterpret_cast<Nd4jPointer *>(inputShapeInfo[0]), result, resultShapeInfo, reinterpret_cast<Nd4jPointer *>(tadPointers[0]), reinterpret_cast<Nd4jPointer *>(offsetPointers[0]), devZTadShape, devZOffsets);
}
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("sharedMemory requested for concatDouble: [%i], registers: [%i]\n", smem, funcAttributes[31].numRegs);
nd4j::DebugHelper::checkErrorCode(stream, "ConcatDouble(...) failed");
}
/**
* This method saves
*/
void NativeOps::tadOnlyShapeInfo(Nd4jLong *xShapeInfo, int *dimension, int dimensionLength, Nd4jLong *target, Nd4jLong *offsets) {
shape::TAD tad;
tad.init(xShapeInfo, dimension, dimensionLength);
//tad->setOutputBuffer(target);
tad.createTadOnlyShapeInfo();
tad.createOffsets();
std::memcpy(reinterpret_cast<void *>(target), tad.tadOnlyShapeInfo, shape::shapeInfoByteLength(tad.tadOnlyShapeInfo));
std::memcpy(reinterpret_cast<void *>(offsets), tad.tadOffsets, tad.numTads * sizeof(Nd4jLong));
}
int NativeOps::memcpyConstantAsync(Nd4jLong dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) {
hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&reserved);
hipMemcpyKind kind;
DEBUG_KERNEL(pStream, -1);
switch (flags) {
case 0: {
kind = hipMemcpyHostToHost;
}
break;
case 1: {
kind = hipMemcpyHostToDevice;
}
break;
case 2: {
kind = hipMemcpyDeviceToHost;
}
case 3: {
kind = hipMemcpyDeviceToDevice;
}
break;
}
//hipError_t result = hipMemcpyAsync((void *) dst, (const void *) src, (size_t) size, kind, *pStream);
hipError_t result = hipMemcpyToSymbolAsync(deviceConstantMemory, const_cast<const void *>(src), size, dst, kind, *pStream);
checkCudaErrors(result);
if (result != 0)
throw std::runtime_error("hipMemcpyToSymbolAsync(...) failed");
return 1;
}
Nd4jPointer NativeOps::getConstantSpace() {
Nd4jPointer dConstAddr;
hipError_t result = hipGetSymbolAddress(reinterpret_cast<void **>(&dConstAddr), deviceConstantMemory);
if (result != 0)
throw std::runtime_error("hipGetSymbolAddress(...) failed");
return dConstAddr;
}
void NativeOps::pullRowsHalf(Nd4jPointer *extraPointers, float16 *x, Nd4jLong *xShapeInfo, float16 *z, Nd4jLong *zShapeInfo, Nd4jLong n, Nd4jLong *indexes, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *zTadShapeInfo, Nd4jLong *zTadOffsets) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
hipLaunchKernelGGL(( pullRowsKernelHalf), dim3(64), dim3(256), 1024, *stream, x, xShapeInfo, z, zShapeInfo, n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets);
DEBUG_KERNEL(stream, -1);
}
void NativeOps::pullRowsFloat(Nd4jPointer *extraPointers, float *x, Nd4jLong *xShapeInfo, float *z, Nd4jLong *zShapeInfo, Nd4jLong n, Nd4jLong *indexes, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *zTadShapeInfo, Nd4jLong *zTadOffsets) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
hipLaunchKernelGGL(( pullRowsKernelFloat), dim3(64), dim3(256), 1024, *stream, x, xShapeInfo, z, zShapeInfo, n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets);
DEBUG_KERNEL(stream, -1);
}
void NativeOps::pullRowsDouble(Nd4jPointer *extraPointers, double *x, Nd4jLong *xShapeInfo, double *z, Nd4jLong *zShapeInfo, Nd4jLong n, Nd4jLong *indexes, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *zTadShapeInfo, Nd4jLong *zTadOffsets) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
hipLaunchKernelGGL(( pullRowsKernelDouble), dim3(64), dim3(256), 1024, *stream, x, xShapeInfo, z, zShapeInfo, n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets);
DEBUG_KERNEL(stream, -1);
}
void NativeOps::averageHalf(Nd4jPointer *extras, Nd4jPointer *dx, float16 *dz, int n, Nd4jLong length, bool propagate) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
int mode = getDeviceId(extras[3]);
float16 **x = reinterpret_cast<float16 **>(dx);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("averageHalf called\n");
// launching on gpu
if (mode == 0) {
dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(float16), funcAttributes[44]);
hipLaunchKernelGGL(( averagingKernelHalf), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, dz, n, length, propagate);
nd4j::DebugHelper::checkErrorCode(stream, "AverageHalf(...) failed");
} else {
nd4j::SpecialMethods<float16>::averageGeneric(x, dz, n, length, propagate);
}
}
void NativeOps::averageFloat(Nd4jPointer *extras, Nd4jPointer *dx, float *dz, int n, Nd4jLong length, bool propagate) {
hipStream_t * stream = reinterpret_cast<hipStream_t *>(&extras[1]);
int mode = getDeviceId(extras[3]);
float **x = reinterpret_cast<float **>(dx);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("averageFloat called\n");
// launching on gpu
if (mode == 0) {
dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(float), funcAttributes[45]);
hipLaunchKernelGGL(( averagingKernelFloat), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, dz, n, length, propagate);
nd4j::DebugHelper::checkErrorCode(stream, "AverageFloat(...) failed");
} else {
// launching on host memory
nd4j::SpecialMethods<float>::averageGeneric(x, dz, n, length, propagate);
}
}
void NativeOps::averageDouble(Nd4jPointer *extras, Nd4jPointer *dx, double *dz, int n, Nd4jLong length, bool propagate) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
int mode = getDeviceId(extras[3]);
double **x = reinterpret_cast<double **>(dx);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("averageDouble called\n");
// launching on gpu
if (mode == 0) {
dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(double), funcAttributes[46]);
averagingKernelDouble << < launchDims.x, launchDims.y, launchDims.z, *stream >> > (x, dz, n, length, propagate);
nd4j::DebugHelper::checkErrorCode(stream, "AverageDouble(...) failed");
} else {
nd4j::SpecialMethods<double>::averageGeneric(x, dz, n, length, propagate);
}
}
void NativeOps::accumulateHalf(Nd4jPointer *extras, Nd4jPointer *dx, float16 *dz, int n, Nd4jLong length) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
int mode = getDeviceId(extras[3]);
float16 **x = reinterpret_cast<float16 **>(dx);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("accumulateHalf called\n");
// launching on gpu
if (mode == 0) {
dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(float16), funcAttributes[44]);
hipLaunchKernelGGL(( accumulateKernelHalf), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, dz, n, length);
nd4j::DebugHelper::checkErrorCode(stream, "AccumulateHalf(...) failed");
} else {
nd4j::SpecialMethods<float16>::accumulateGeneric(x, dz, n, length);
}
}
void NativeOps::accumulateFloat(Nd4jPointer *extras, Nd4jPointer *dx, float *dz, int n, Nd4jLong length) {
hipStream_t * stream = reinterpret_cast<hipStream_t *>(&extras[1]);
int mode = getDeviceId(extras[3]);
float **x = reinterpret_cast<float **>(dx);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("accumulateFloat called\n");
// launching on gpu
if (mode == 0) {
dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(float), funcAttributes[45]);
hipLaunchKernelGGL(( accumulateKernelFloat), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, dz, n, length);
nd4j::DebugHelper::checkErrorCode(stream, "AccumulateFloat(...) failed");
} else {
// launching on host memory
nd4j::SpecialMethods<float>::accumulateGeneric(x, dz, n, length);
}
}
void NativeOps::accumulateDouble(Nd4jPointer *extras, Nd4jPointer *dx, double *dz, int n, Nd4jLong length) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
int mode = getDeviceId(extras[3]);
double **x = reinterpret_cast<double **>(dx);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("accumulateDouble called\n");
// launching on gpu
if (mode == 0) {
dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(double), funcAttributes[46]);
accumulateKernelDouble << < launchDims.x, launchDims.y, launchDims.z, *stream >> > (x, dz, n, length);
nd4j::DebugHelper::checkErrorCode(stream, "AccumulateDouble(...) failed");
} else {
nd4j::SpecialMethods<double>::accumulateGeneric(x, dz, n, length);
}
}
void NativeOps::shuffleDouble(Nd4jPointer *extras, Nd4jPointer *dx, Nd4jPointer *xShapeInfo, Nd4jPointer *dz, Nd4jPointer *zShapeInfo, int N, int *shuffleMap, Nd4jPointer *tadShapeInfo, Nd4jPointer *tadOffsets) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
double **x = reinterpret_cast<double **>(dx);
double **z = reinterpret_cast<double **>(dz);
auto xShape = reinterpret_cast<Nd4jLong **>(xShapeInfo);
auto zShape = reinterpret_cast<Nd4jLong **>(zShapeInfo);
auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong **>(tadShapeInfo);
auto tadOffset = reinterpret_cast<Nd4jLong **>(tadOffsets);
hipLaunchKernelGGL(( shuffleKernelDouble), dim3(32), dim3(128), 2048, *stream, x, xShape, z, zShape, N, shuffleMap, tadOnlyShapeInfo, tadOffset);
DEBUG_KERNEL(stream, 0);
}
void NativeOps::shuffleFloat(Nd4jPointer *extras, Nd4jPointer *dx, Nd4jPointer *xShapeInfo, Nd4jPointer *dz, Nd4jPointer *zShapeInfo, int N, int *shuffleMap, Nd4jPointer *tadShapeInfo, Nd4jPointer *tadOffsets) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
float **x = reinterpret_cast<float **>(dx);
float **z = reinterpret_cast<float **>(dz);
auto xShape = reinterpret_cast<Nd4jLong **>(xShapeInfo);
auto zShape = reinterpret_cast<Nd4jLong **>(zShapeInfo);
auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong **>(tadShapeInfo);
auto tadOffset = reinterpret_cast<Nd4jLong **>(tadOffsets);
hipLaunchKernelGGL(( shuffleKernelFloat), dim3(32), dim3(128), 2048, *stream, x, xShape, z, zShape, N, shuffleMap, tadOnlyShapeInfo, tadOffset);
DEBUG_KERNEL(stream, 0);
}
void NativeOps::shuffleHalf(Nd4jPointer *extras, Nd4jPointer *dx, Nd4jPointer *xShapeInfo, Nd4jPointer *dz, Nd4jPointer *zShapeInfo, int N, int *shuffleMap, Nd4jPointer *tadShapeInfo, Nd4jPointer *tadOffsets) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
float16 **x = reinterpret_cast<float16 **>(dx);
float16 **z = reinterpret_cast<float16 **>(dz);
auto xShape = reinterpret_cast<Nd4jLong **>(xShapeInfo);
auto zShape = reinterpret_cast<Nd4jLong **>(zShapeInfo);
auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong **>(tadShapeInfo);
auto tadOffset = reinterpret_cast<Nd4jLong **>(tadOffsets);
hipLaunchKernelGGL(( shuffleKernelHalf), dim3(32), dim3(128), 2048, *stream, x, xShape, z, zShape, N, shuffleMap, tadOnlyShapeInfo, tadOffset);
DEBUG_KERNEL(stream, 0);
}
void NativeOps::execMetaPredicateStridedFloat(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, Nd4jLong N, float *dx, Nd4jLong xStride, float *dy, Nd4jLong yStride, float *dz, Nd4jLong zStride, float *extraA, float *extraB, float scalarA, float scalarB) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
functions::grid::GRIDStrided<float>::execMetaPredicateStrided(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB);
DEBUG_KERNEL(stream, opNumA);
}
void NativeOps::execMetaPredicateStridedDouble(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, Nd4jLong N, double *dx, Nd4jLong xStride, double *dy, Nd4jLong yStride, double *dz, Nd4jLong zStride, double *extraA, double *extraB, double scalarA, double scalarB) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
functions::grid::GRIDStrided<double>::execMetaPredicateStrided(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB);
DEBUG_KERNEL(stream, opNumA);
}
void NativeOps::execMetaPredicateStridedHalf(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, Nd4jLong N, float16 *dx, Nd4jLong xStride, float16 *dy, Nd4jLong yStride, float16 *dz, Nd4jLong zStride, float16 *extraA, float16 *extraB, float scalarA, float scalarB) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
float16 scalA = (float16) scalarA;
float16 scalB = (float16) scalarB;
functions::grid::GRIDStrided<float16>::execMetaPredicateStrided(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB);
DEBUG_KERNEL(stream, opNumA);
}
void NativeOps::execMetaPredicateReduceFloat(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, float *dx, Nd4jLong *xShapeInfo, float *dy, Nd4jLong *yShapeInfo, float *dz, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, float *extraA, float *extraB, float scalarA, float scalarB, bool scalarReturned) {
// no-op
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
/*
metaPredicateReduceFloat(const int opTypeA, const int opNumA, const int opTypeB, const int opNumB,
float *dx, int *xShapeInfo, float *dy, int *yShapeInfo, float *dz, int *zShapeInfo, int *tadShapeInfo, int *tadOffsets, float *reductionBuffer, float *extraA, float *extraB, float scalarA, float scalarB) {
*/
// metaPredicateReduceFloat<<<256, 256, 1024, *stream>>>(opTypeA, opNumA, opTypeB, opNumB, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, nullptr, extraA, extraB, scalarA, scalarB, scalarReturned);
DEBUG_KERNEL(stream, opNumA);
}
void NativeOps::execMetaPredicateShapeDouble(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, Nd4jLong N, double *dx, Nd4jLong *xShapeInfo, double *dy, Nd4jLong *yShapeInfo, double *dz, Nd4jLong *zShapeInfo, double *extraA, double *extraB, double scalarA, double scalarB) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
functions::grid::GRIDShaped<double>::execMetaPredicateShaped(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB);
DEBUG_KERNEL(stream, opNumA);
}
void NativeOps::execMetaPredicateShapeHalf(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, Nd4jLong N, float16 *dx, Nd4jLong *xShapeInfo, float16 *dy, Nd4jLong *yShapeInfo, float16 *dz, Nd4jLong *zShapeInfo, float16 *extraA, float16 *extraB, float scalarA, float scalarB) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
// we have to converf float -> fp16 prior to kernel call
float16 scalA = (float16) scalarA;
float16 scalB = (float16) scalarB;
functions::grid::GRIDShaped<float16>::execMetaPredicateShaped(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB);
DEBUG_KERNEL(stream, opNumA);
}
void NativeOps::execMetaPredicateShapeFloat(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, Nd4jLong N, float *dx, Nd4jLong *xShapeInfo, float *dy, Nd4jLong *yShapeInfo, float *dz, Nd4jLong *zShapeInfo, float *extraA, float *extraB, float scalarA, float scalarB) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
functions::grid::GRIDShaped<float>::execMetaPredicateShaped(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB);
DEBUG_KERNEL(stream, opNumA);
}
bool NativeOps::isExperimentalEnabled() {
return experimentalSupport;
}
void NativeOps::setOmpMinThreads(int threads) {
minThreads = nd4j::math::nd4j_max<int>(32, threads);
minThreads = nd4j::math::nd4j_min<int>(maxThreads, minThreads);
}
int NativeOps::getDevice() {
int curDevice = -1;
hipGetDevice(&curDevice);
return curDevice;
}
void NativeOps::setElementThreshold(int num) {
// this is no-op for CUDA
}
void NativeOps::setTADThreshold(int num) {
// this is no-op for CUDA
}
void NativeOps::execScalarFloat(Nd4jPointer *extraPointers,int opNum,
float *x,
Nd4jLong *xShapeInfo,
float *z,
Nd4jLong *zShapeInfo,
float *scalars,
float *extraParams,
int *dimension,
int dimensionLength) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostTadShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
//dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]),hostXShapeInfo, hostTadShapeInfo, funcAttributes[47] ,dimensionLength, sizeof(float), 0);
dim3 launchDims = dim3(256, 256, 1024);
functions::scalar::ScalarTransform<float>::executeCudaAlongDimension(launchDims, extraPointers, opNum, x, xShapeInfo, z, zShapeInfo, scalars, extraParams, dimension, dimensionLength);
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execScalarDouble(Nd4jPointer *extraPointers,int opNum,
double *x,
Nd4jLong *xShapeInfo,
double *z,
Nd4jLong *zShapeInfo,
double *scalars,
double *extraParams,
int *dimension,
int dimensionLength) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(256, 256, 1024);
functions::scalar::ScalarTransform<double>::executeCudaAlongDimension(launchDims, extraPointers, opNum, x, xShapeInfo, z, zShapeInfo, scalars, extraParams, dimension, dimensionLength);
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execScalarHalf(Nd4jPointer *extraPointers,int opNum,
float16 *x,
Nd4jLong *xShapeInfo,
float16 *z,
Nd4jLong *zShapeInfo,
float16 *scalars,
float16 *extraParams,
int *dimension,
int dimensionLength) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(256, 256, 1024);
functions::scalar::ScalarTransform<float16>::executeCudaAlongDimension(launchDims, extraPointers, opNum, x, xShapeInfo, z, zShapeInfo, scalars, extraParams, dimension, dimensionLength);
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execAggregateFloat(Nd4jPointer *extraPointers,int opNum,
float **arguments,
int numArguments,
Nd4jLong **shapes,
int numShapes,
int *indexArguments,
int numIndexArguments,
int **intArrays,
int numIntArrays,
float *realArguments,
int numRealArguments) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int numBlocks = getDeviceId(extraPointers[2]);
int numThreads = getDeviceId(extraPointers[3]);
int shmem = getDeviceId(extraPointers[4]);
dim3 launchDims = dim3(numBlocks, numThreads, shmem);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(aggregateSimple, float, PARAMS(arguments, numArguments, shapes, numShapes, indexArguments, numIndexArguments, intArrays, numIntArrays, realArguments, numRealArguments), OPS_A(AGGREGATE_OPS))
nd4j::DebugHelper::checkErrorCode(stream, "execAggregateFloat(...) failed");
}
void NativeOps::execAggregateDouble(Nd4jPointer *extraPointers,int opNum,
double **arguments,
int numArguments,
Nd4jLong **shapes,
int numShapes,
int *indexArguments,
int numIndexArguments,
int **intArrays,
int numIntArrays,
double *realArguments,
int numRealArguments) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int numBlocks = getDeviceId(extraPointers[2]);
int numThreads = getDeviceId(extraPointers[3]);
int shmem = getDeviceId(extraPointers[4]);
dim3 launchDims = dim3(numBlocks, numThreads, shmem);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(aggregateSimple, double, PARAMS(arguments, numArguments, shapes, numShapes, indexArguments, numIndexArguments, intArrays, numIntArrays, realArguments, numRealArguments), OPS_A(AGGREGATE_OPS))
nd4j::DebugHelper::checkErrorCode(stream, "execAggregateDouble(...) failed");
}
void NativeOps::execAggregateHalf(Nd4jPointer *extraPointers,int opNum,
float16 **arguments,
int numArguments,
Nd4jLong **shapes,
int numShapes,
int *indexArguments,
int numIndexArguments,
int **intArrays,
int numIntArrays,
float16 *realArguments,
int numRealArguments) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int numBlocks = getDeviceId(extraPointers[2]);
int numThreads = getDeviceId(extraPointers[3]);
int shmem = getDeviceId(extraPointers[4]);
dim3 launchDims = dim3(numBlocks, numThreads, shmem);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(aggregateSimple, float16, PARAMS(arguments, numArguments, shapes, numShapes, indexArguments, numIndexArguments, intArrays, numIntArrays, realArguments, numRealArguments), OPS_A(AGGREGATE_OPS))
nd4j::DebugHelper::checkErrorCode(stream, "execAggregateHalf(...) failed");
}
void NativeOps::execAggregateBatchFloat(Nd4jPointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes, int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments) {
// not implemented yet
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int numBlocks = getDeviceId(extraPointers[2]);
int numThreads = getDeviceId(extraPointers[3]);
int shmem = getDeviceId(extraPointers[4]);
dim3 launchDims = dim3(numAggregates, numThreads, shmem);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(aggregateBatchSimple, float, PARAMS(numAggregates, opNum, maxArgs, maxShapes, maxIntArrays, maxIntArraySize, maxIdx, maxReals, ptrToArguments), OPS_A(AGGREGATE_OPS))
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execAggregateBatchDouble(Nd4jPointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes, int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments) {
// not implemented yet
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int numBlocks = getDeviceId(extraPointers[2]);
int numThreads = getDeviceId(extraPointers[3]);
int shmem = getDeviceId(extraPointers[4]);
dim3 launchDims = dim3(numAggregates, numThreads, shmem);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(aggregateBatchSimple, double, PARAMS(numAggregates, opNum, maxArgs, maxShapes, maxIntArrays, maxIntArraySize, maxIdx, maxReals, ptrToArguments), OPS_A(AGGREGATE_OPS))
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execAggregateBatchHalf(Nd4jPointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes, int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments) {
// not implemented yet
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int numBlocks = getDeviceId(extraPointers[2]);
int numThreads = getDeviceId(extraPointers[3]);
int shmem = getDeviceId(extraPointers[4]);
dim3 launchDims = dim3(numAggregates, numThreads, shmem);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(aggregateBatchSimple, float16, PARAMS(numAggregates, opNum, maxArgs, maxShapes, maxIntArrays, maxIntArraySize, maxIdx, maxReals, ptrToArguments), OPS_A(AGGREGATE_OPS))
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execRandomFloat(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, float *z, Nd4jLong *zShapeBuffer, float *extraArguments) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float)) );
functions::random::RandomFunction<float>::executeCudaSingle(launchDims, extraPointers, opNum, stateHost, z, zShapeBuffer, extraArguments);
}
void NativeOps::execRandomFloat(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, float *x, Nd4jLong *xShapeBuffer, float *y, Nd4jLong *yShapeBuffer, float *z, Nd4jLong *zShapeBuffer, float *extraArguments) {
dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float)) );
functions::random::RandomFunction<float>::executeCudaTriple(launchDims, extraPointers, opNum, stateHost, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments);
}
void NativeOps::execRandomFloat(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, float *x, Nd4jLong *xShapeBuffer, float *z, Nd4jLong *zShapeBuffer, float *extraArguments) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float)) );
functions::random::RandomFunction<float>::executeCudaDouble(launchDims, extraPointers, opNum, stateHost, x, xShapeBuffer, z, zShapeBuffer, extraArguments);
}
void NativeOps::execRandomDouble(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, double *z, Nd4jLong *zShapeBuffer, double *extraArguments) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(double)));
functions::random::RandomFunction<double>::executeCudaSingle(launchDims, extraPointers, opNum, state, z, zShapeBuffer, extraArguments);
}
void NativeOps::execRandomDouble(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, double *x, Nd4jLong *xShapeBuffer, double *y, Nd4jLong *yShapeBuffer, double *z, Nd4jLong *zShapeBuffer, double *extraArguments) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(double)));
functions::random::RandomFunction<double>::executeCudaTriple(launchDims, extraPointers, opNum, state, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments);
}
void NativeOps::execRandomDouble(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, double *x, Nd4jLong *xShapeBuffer, double *z, Nd4jLong *zShapeBuffer, double *extraArguments) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(double)));
functions::random::RandomFunction<double>::executeCudaDouble(launchDims, extraPointers, opNum, state, x, xShapeBuffer, z, zShapeBuffer, extraArguments);
}
void NativeOps::execRandomHalf(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, float16 *z, Nd4jLong *zShapeBuffer, float16 *extraArguments) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float16)));
functions::random::RandomFunction<float16>::executeCudaSingle(launchDims, extraPointers, opNum, state, z, zShapeBuffer, extraArguments);
}
void NativeOps::execRandomHalf(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, float16 *x, Nd4jLong *xShapeBuffer, float16 *y, Nd4jLong *yShapeBuffer, float16 *z, Nd4jLong *zShapeBuffer, float16 *extraArguments) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float16)));
functions::random::RandomFunction<float16>::executeCudaTriple(launchDims, extraPointers, opNum, state, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments);
}
void NativeOps::execRandomHalf(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, float16 *x, Nd4jLong *xShapeBuffer, float16 *z, Nd4jLong *zShapeBuffer, float16 *extraArguments) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float16)));
functions::random::RandomFunction<float16>::executeCudaDouble(launchDims, extraPointers, opNum, state, x, xShapeBuffer, z, zShapeBuffer, extraArguments);
}
Nd4jPointer NativeOps::initRandom(Nd4jPointer *extraPointers, long seed, long bufferSize, Nd4jPointer ptrToBuffer) {
unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
// we don't synchronize at random initialization, it's safe to go unsync here
// hipStreamSynchronize(*stream);
auto ptrDev = reinterpret_cast<unsigned long long *>(ptrToBuffer);
auto buffer = new nd4j::random::RandomBuffer(seed, bufferSize, reinterpret_cast<uint64_t *>(ptrHost), reinterpret_cast<uint64_t *>(ptrDev));
buffer->propagateToDevice(buffer, *stream);
nd4j::DebugHelper::checkErrorCode(stream, "initRandom(...) failed A");
// we generate sequence in the host memory
nd4j::random::Xoroshiro128 generator(buffer);
generator.refreshBuffer();
// and copy it to gpu
hipMemcpyAsync(ptrDev, ptrHost, bufferSize * 8, hipMemcpyHostToDevice, *stream);
nd4j::DebugHelper::checkErrorCode(stream, "initRandom(...) failed B");
return buffer;
}
void NativeOps::destroyRandom(Nd4jPointer ptrBuffer) {
nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrBuffer);
// FIXME: it's bad thing, but we can't know in advance, which stream(s) where using this generator in practice
hipDeviceSynchronize();
delete buffer;
}
void NativeOps::refreshBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) {
nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrRandom);
unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
hipStreamSynchronize(*stream);
uint64_t *ptrDev = buffer->getDeviceBuffer();
// update rng state
buffer->setSeed(seed);
buffer->setOffset(0);
buffer->propagateToDevice(buffer, *stream);
// refresh buffer on host size
nd4j::random::Xoroshiro128 generator(buffer);
generator.refreshBuffer();
// copy back to gpu
hipMemcpyAsync(ptrDev, ptrHost, buffer->getSize() * 8, hipMemcpyHostToDevice, *stream);
}
void NativeOps::reSeedBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) {
nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrRandom);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
hipStreamSynchronize(*stream);
// update rng state
buffer->reSeed(seed);
buffer->setOffset(0);
buffer->propagateToDevice(buffer, *stream);
}
/**
*
* @param npyArray
* @return
*/
Nd4jPointer NativeOps::shapeBufferForNumpy(Nd4jPointer npyArray) {
/*
cnpy::NpyArray *arrPointer = reinterpret_cast<cnpy::NpyArray *>(npyArray);
int *shapeBuffer = shape::shapeBufferOfNpy(*arrPointer);
return reinterpret_cast<Nd4jPointer>(shapeBuffer);
*/
cnpy::NpyArray arr = cnpy::loadNpyFromPointer(reinterpret_cast<char *>(npyArray));
unsigned int *shape = new unsigned int[arr.shape.size()];
for(int i = 0; i < arr.shape.size(); i++) {
shape[i] = arr.shape[i];
}
auto shapeBuffer = shape::shapeBufferOfNpy(arr.shape.size(),
shape,
arr.fortranOrder);
delete[] shape;
return reinterpret_cast<Nd4jPointer>(shapeBuffer);
}
/**
*
* @param npyArray
* @return
*/
Nd4jPointer NativeOps::dataPointForNumpy(Nd4jPointer npyArray) {
char *buff = reinterpret_cast<char *>(npyArray);
//printf("Pointer contents %s\n",buff);
cnpy::NpyArray arr = cnpy::loadNpyFromPointer(reinterpret_cast<char *>(npyArray));
cnpy::NpyArray *arrPointer = &arr;
char *data = arrPointer->data;
if(arrPointer->wordSize == sizeof(float)) {
float *floatData = reinterpret_cast<float *>(data);
return reinterpret_cast<Nd4jPointer>(floatData);
}
else if(arrPointer->wordSize == sizeof(double)) {
double *doubleData = reinterpret_cast<double *>(data);
return reinterpret_cast<Nd4jPointer >(doubleData);
}
return reinterpret_cast<Nd4jPointer >(0);
}
/**
* Load a numpy array from a file
* and return it as an Nd4jPointer
* @param path
* @return
*/
Nd4jPointer NativeOps::numpyFromFile(std::string path) {
/*cnpy::NpyArray arr = cnpy::npyLoad(path);
return reinterpret_cast<Nd4jPointer >(&arr);
*/
char *numpyBuffer = cnpy::loadFile(path.data());
return reinterpret_cast<Nd4jPointer >(numpyBuffer);
}
void NativeOps::releaseNumpy(Nd4jPointer npyArray) {
free(reinterpret_cast<void *>(npyArray));
}
/**
* Return the length of a shape buffer
* based on the pointer
* @param buffer the buffer pointer to check
* @return
*/
int NativeOps::lengthForShapeBufferPointer(Nd4jPointer buffer) {
auto shapeBuffer = reinterpret_cast<Nd4jLong *>(buffer);
return shape::shapeInfoLength(shape::rank(shapeBuffer));
}
/**
* Get the element size for a numpy array
* @param npyArray the numpy array's address
* to get the length for
* @return
*/
int NativeOps::elementSizeForNpyArray(Nd4jPointer npyArray) {
cnpy::NpyArray arr = cnpy::loadNpyFromPointer(reinterpret_cast<char *>(npyArray));
cnpy::NpyArray *arrPointer = &arr;
int size = arrPointer->wordSize;
return size;
/*
cnpy::NpyArray *arr = reinterpret_cast<cnpy::NpyArray *>(npyArray);
return arr->wordSize;
*/
}
/**
* The pointer to get the address for
*
* @param address the address to get the pointer
* @return the pointer for the given address
*/
Nd4jPointer NativeOps::pointerForAddress(Nd4jLong address) {
return reinterpret_cast<Nd4jPointer >(address);
}
void NativeOps::tearDouble(Nd4jPointer *extras, double *x, Nd4jLong *xShapeInfo, Nd4jPointer *targets, Nd4jLong *zShapeInfo, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
hipLaunchKernelGGL(( tearKernelDouble), dim3(512), dim3(512), 512, *stream, x, xShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets);
nd4j::DebugHelper::checkErrorCode(stream, "tearDouble(...) failed");
}
void NativeOps::tearFloat(Nd4jPointer *extras, float *x, Nd4jLong *xShapeInfo, Nd4jPointer *targets, Nd4jLong *zShapeInfo, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
hipLaunchKernelGGL(( tearKernelFloat), dim3(512), dim3(512), 512, *stream, x, xShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets);
nd4j::DebugHelper::checkErrorCode(stream, "tearFloat(...) failed");
}
void NativeOps::tearHalf(Nd4jPointer *extras, float16 *x, Nd4jLong *xShapeInfo, Nd4jPointer *targets, Nd4jLong *zShapeInfo, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
hipLaunchKernelGGL(( tearKernelHalf), dim3(512), dim3(512), 512, *stream, x, xShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets);
nd4j::DebugHelper::checkErrorCode(stream, "tearHalf(...) failed");
}
void prescanArrayRecursive(Nd4jPointer *extras, int *z, int *x, int numElements, int level) {
auto stream = reinterpret_cast<hipStream_t *>(&extras[1]);
auto g_scanBlockSums = reinterpret_cast<int **>(&extras[2]);
int blockSize = 512; // max size of the thread blocks
int numBlocks = nd4j::math::nd4j_max<int>(1, static_cast<int>(ceil(static_cast<float>(numElements) / (2.f * blockSize))));
int numThreads;
if (numBlocks > 1)
numThreads = blockSize;
else if (nd4j::isPowerOfTwo(numElements))
numThreads = numElements / 2;
else
numThreads = nd4j::floorPow2(numElements);
int numEltsPerBlock = numThreads * 2;
// if this is a non-power-of-2 array, the last block will be non-full
// compute the smallest power of 2 able to compute its scan.
int numEltsLastBlock =
numElements - (numBlocks-1) * numEltsPerBlock;
int numThreadsLastBlock = nd4j::math::nd4j_max<int>(1, numEltsLastBlock / 2);
int np2LastBlock = 0;
int sharedMemLastBlock = 0;
if (numEltsLastBlock != numEltsPerBlock) {
np2LastBlock = 1;
if(!isPowerOfTwo(numEltsLastBlock))
numThreadsLastBlock = floorPow2(numEltsLastBlock);
unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS;
sharedMemLastBlock = sizeof(int) * (2 * numThreadsLastBlock + extraSpace);
}
// padding space is used to avoid shared memory bank conflicts
int extraSpace = numEltsPerBlock / NUM_BANKS;
int sharedMemSize = sizeof(int) * (numEltsPerBlock + extraSpace);
// setup execution parameters
// if NP2, we process the last block separately
dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1);
dim3 threads(numThreads, 1, 1);
dim3 gridOnes(1, 1, 1);
dim3 threadsOnes(numThreadsLastBlock, 1, 1);
if (sharedMemSize < 2048)
sharedMemSize = 2048;
if (sharedMemLastBlock < 2048)
sharedMemLastBlock = 2048;
// execute the scan
if (numBlocks > 1) {
nd4j::prescanLauncher<true, false>(grid, threads, sharedMemSize, stream, z, x, g_scanBlockSums[level], numThreads * 2, 0, 0);
if (np2LastBlock) {
nd4j::prescanLauncher<true, true>(gridOnes, threadsOnes, sharedMemLastBlock, stream, z, x, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock);
}
// After scanning all the sub-blocks, we are mostly done. But now we
// need to take all of the last values of the sub-blocks and scan those.
// This will give us a new value that must be sdded to each block to
// get the final results.
// recursive (CPU) call
prescanArrayRecursive(extras, g_scanBlockSums[level], g_scanBlockSums[level], numBlocks, level+1);
hipLaunchKernelGGL(( nd4j::uniformAdd), dim3(grid), dim3(threads), 1024, *stream, z, g_scanBlockSums[level], numElements - numEltsLastBlock, 0, 0);
if (np2LastBlock) {
hipLaunchKernelGGL(( nd4j::uniformAdd), dim3(1), dim3(numThreadsLastBlock), 1024, *stream, z, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock);
}
} else if (isPowerOfTwo(numElements)) {
nd4j::prescanLauncher<false, false>(grid, threads, sharedMemSize, stream, z, x, 0, numThreads * 2, 0, 0);
} else {
nd4j::prescanLauncher<false, true>(grid, threads, sharedMemSize, stream, z, x, 0, numElements, 0, 0);
}
}
void NativeOps::encodeThresholdP1Float(Nd4jPointer *extras, float *dx, Nd4jLong N, int *dz, float threshold) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
int blockSize = 1024;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
hipLaunchKernelGGL(( nd4j::encoderKernelP1Float), dim3(numBlocks), dim3(blockSize) , 1024, *stream, dx, N, dz, threshold);
nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP1Float(...) failed");
}
void NativeOps::encodeThresholdP1Double(Nd4jPointer *extras, double *dx, Nd4jLong N, int *dz, float threshold) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
int blockSize = 1024;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
hipLaunchKernelGGL(( nd4j::encoderKernelP1Double), dim3(numBlocks), dim3(blockSize) , 1024, *stream, dx, N, dz, threshold);
nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP1Double(...) failed");
}
void NativeOps::encodeThresholdP1Half(Nd4jPointer *extras, float16 *dx, Nd4jLong N, int *dz, float threshold) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
int blockSize = 1024;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
hipLaunchKernelGGL(( encoderKernelP1Half), dim3(numBlocks), dim3(blockSize) , 1024, *stream, dx, N, dz, threshold);
nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP1Half(...) failed");
}
void NativeOps::encodeThresholdP2Int(Nd4jPointer *extraPointers, int *dx, Nd4jLong N, int *dz) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
//encoderKernelP2Float<<<numBlocks, blockSize , 1024 * sizeof(float), *stream>>>(dx, N, dz);
// it
prescanArrayRecursive(extraPointers, dz, dx + 1, (int) N, 0);
nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP2Int(...) failed");
}
void NativeOps::encodeThresholdP3Float(Nd4jPointer *extraPointers, float *dx, int *offsets, Nd4jLong N, int *dz){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int blockSize = 1024;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
hipLaunchKernelGGL(( nd4j::encoderKernelP3Float), dim3(numBlocks), dim3(blockSize) , 4096, *stream, dx, offsets, N, dz);
nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP3Float(...) failed");
}
void NativeOps::encodeThresholdP3Double(Nd4jPointer *extraPointers, double *dx, int *offsets, Nd4jLong N, int *dz){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int blockSize = 1024;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
hipLaunchKernelGGL(( nd4j::encoderKernelP3Double), dim3(numBlocks), dim3(blockSize) , 4096, *stream, dx, offsets, N, dz);
nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP3Double(...) failed");
}
void NativeOps::encodeThresholdP3Half(Nd4jPointer *extraPointers, float16 *dx, int *offsets, Nd4jLong N, int *dz){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int blockSize = 1024;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
hipLaunchKernelGGL(( nd4j::encoderKernelP3Half), dim3(numBlocks), dim3(blockSize) , 4096, *stream, dx, offsets, N, dz);
nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP3Half(...) failed");
}
void NativeOps::decodeThresholdFloat(Nd4jPointer *extraPointers, void *dx, Nd4jLong N, float *dz){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
// we probably want to have smaller blocks here, memory writes are misaligned anyway
int blockSize = 128;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
hipLaunchKernelGGL(( nd4j::decoderKernelFloat), dim3(numBlocks), dim3(blockSize) , 1024, *stream, dx, N, dz);
nd4j::DebugHelper::checkErrorCode(stream, "decodeThresholdFloat(...) failed");
}
void NativeOps::decodeThresholdDouble(Nd4jPointer *extraPointers, void *dx, Nd4jLong N, double *dz){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
// we probably want to have smaller blocks here, memory writes are misaligned anyway
int blockSize = 128;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
hipLaunchKernelGGL(( nd4j::decoderKernelDouble), dim3(numBlocks), dim3(blockSize) , 1024, *stream, dx, N, dz);
nd4j::DebugHelper::checkErrorCode(stream, "decodeThresholdDouble(...) failed");
}
void NativeOps::decodeThresholdHalf(Nd4jPointer *extraPointers, void *dx, Nd4jLong N, float16 *dz){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
// we probably want to have smaller blocks here, memory writes are misaligned anyway
int blockSize = 128;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
hipLaunchKernelGGL(( nd4j::decoderKernelHalf), dim3(numBlocks), dim3(blockSize) , 1024, *stream, dx, N, dz);
nd4j::DebugHelper::checkErrorCode(stream, "decodeThresholdHalf(...) failed");
}
void NativeOps::execReduce3AllDouble(Nd4jPointer *extraPointers,
int opNum,
double *x,
Nd4jLong *xInfo,
double *extraParamsVals,
double *y,
Nd4jLong *yInfo,
double *result,
Nd4jLong *resultShapeInfoBuffer,
int *dimension,
int dimensionLength,
Nd4jLong *xTadShapeInfo,
Nd4jLong *xOffsets,
Nd4jLong *yTadShapeInfo,
Nd4jLong *yOffsets) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D119 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[7], dimensionLength, sizeof(double), 2);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AD119 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( reduce3AllDouble), dim3(launchDims.x), dim3(512), (512 * 8 * 2 + 512), *stream,
opNum,
x,
xInfo,
y,
yInfo,
extraParamsVals,
result,
resultShapeInfoBuffer,
dimension,
dimensionLength,
1, allocationPointer, xTadShapeInfo, xOffsets, yTadShapeInfo, yOffsets);
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execReduce3AllFloat(Nd4jPointer *extraPointers,
int opNum,
float *x,
Nd4jLong *xInfo,
float *extraParamsVals,
float *y,
Nd4jLong *yInfo,
float *result,
Nd4jLong *resultShapeInfoBuffer,
int *dimension,
int dimensionLength,
Nd4jLong *xTadShapeInfo,
Nd4jLong *xOffsets,
Nd4jLong *yTadShapeInfo,
Nd4jLong *yOffsets) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F119 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[7], dimensionLength, sizeof(float), 2);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF119 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( reduce3AllFloat), dim3(launchDims.x), dim3(512), (512 * 4 * 2 + 512), *stream,
opNum,
x,
xInfo,
y,
yInfo,
extraParamsVals,
result,
resultShapeInfoBuffer,
dimension,
dimensionLength,
1, allocationPointer, xTadShapeInfo, xOffsets, yTadShapeInfo, yOffsets);
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execReduce3AllHalf(Nd4jPointer *extraPointers,
int opNum,
float16 *x,
Nd4jLong *xInfo,
float16 *extraParamsVals,
float16 *y,
Nd4jLong *yInfo,
float16 *result,
Nd4jLong *resultShapeInfoBuffer,
int *dimension,
int dimensionLength,
Nd4jLong *xTadShapeInfo,
Nd4jLong *xOffsets,
Nd4jLong *yTadShapeInfo,
Nd4jLong *yOffsets) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H119 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[7], dimensionLength, sizeof(float16), 2);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH119 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( reduce3AllHalf), dim3(launchDims.x), dim3(512), (512 * 2 * 2 + 512), *stream,
opNum,
x,
xInfo,
y,
yInfo,
extraParamsVals,
result,
resultShapeInfoBuffer,
dimension,
dimensionLength,
1, allocationPointer, xTadShapeInfo, xOffsets, yTadShapeInfo, yOffsets);
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::sortFloat(Nd4jPointer *extraPointers, float *x, Nd4jLong *xShapeInfo, bool descending) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[ 1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto xLength = shape::length(hostXShapeInfo);
auto xEWS = shape::elementWiseStride(hostXShapeInfo);
// check if xLength is a power of 2, and use bitonic sort, if that's the case
if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) {
int numThreads = nd4j::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
for (int k = 2; k <= xLength; k = 2*k) {
for (int j = k >> 1; j > 0; j = j >> 1) {
hipLaunchKernelGGL(( cudaBitonicSortFloat), dim3(numBlocks), dim3(numThreads), 512, *stream, x, xShapeInfo, j, k, xLength, descending);
}
}
} else {
#ifdef __clang__
if (1 > 0) {
#elif __GNUC__
if ((xLength > 1024 * 1024 * 10) && xEWS == 1) {
b40c::radix_sort::Enactor enactor;
b40c::util::DoubleBuffer<float> sort_storage(x);
enactor.Sort(sort_storage, xLength);
// fire reverse op
if (descending)
execTransformFloat(extraPointers, 70, x, xShapeInfo, x, xShapeInfo, nullptr);
} else {
#else
if (1 > 0) {
#endif
int numThreads = nd4j::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
numBlocks = nd4j::math::nd4j_min<int>(512, numBlocks);
int max = 2, dg = 0;
while (max < xLength) {
max <<= 1;
dg++;
}
max <<= 1;
for (int window = 2; window < max; window<<=1) {
int n = window;
int rev = 0;
do{
int half = n >> 1;
hipLaunchKernelGGL(( cudaSortFloat), dim3(numBlocks), dim3(numThreads), numThreads * 2 * sizeof(float), *stream, x, xShapeInfo, n, xLength, rev, descending);
n>>=1;
rev = 1;
} while(n > 1);
}
}
}
nd4j::DebugHelper::checkErrorCode(stream, "sortFloat(...) failed");
}
void NativeOps::sortDouble(Nd4jPointer *extraPointers, double *x, Nd4jLong *xShapeInfo, bool descending) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto xLength = shape::length(hostXShapeInfo);
auto xEWS = shape::elementWiseStride(hostXShapeInfo);
// check if xLength is a power of 2, and use bitonic sort, if that's the case
if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) {
int numThreads = nd4j::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
for (int k = 2; k <= xLength; k = 2*k) {
for (int j = k >> 1; j > 0; j = j >> 1) {
hipLaunchKernelGGL(( cudaBitonicSortDouble), dim3(numBlocks), dim3(numThreads), 512, *stream, x, xShapeInfo, j, k, xLength, descending);
}
}
} else {
#ifdef __clang__
if (1 > 0) {
#elif __GNUC__
if ((xLength > 1024 * 1024 * 10) && xEWS == 1) {
b40c::radix_sort::Enactor enactor;
b40c::util::DoubleBuffer<double> sort_storage(x);
enactor.Sort(sort_storage, xLength);
// fire reverse op
if (descending)
execTransformDouble(extraPointers, 70, x, xShapeInfo, x, xShapeInfo, nullptr);
} else {
#else
if ( 1 > 0) {
#endif
int numThreads = nd4j::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
numBlocks = nd4j::math::nd4j_min<int>(512, numBlocks);
int max = 2, dg = 0;
while (max < xLength) {
max <<= 1;
dg++;
}
max <<= 1;
for (int window = 2; window < max; window<<=1) {
int n = window;
int rev = 0;
do{
int half = n >> 1;
hipLaunchKernelGGL(( cudaSortDouble), dim3(numBlocks), dim3(numThreads), numThreads * 2 * sizeof(double), *stream, x, xShapeInfo, n, xLength, rev, descending);
n>>=1;
rev = 1;
} while(n > 1);
}
}
}
nd4j::DebugHelper::checkErrorCode(stream, "sortDouble(...) failed");
}
void NativeOps::sortHalf(Nd4jPointer *extraPointers, float16 *x, Nd4jLong *xShapeInfo, bool descending) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
int xLength = shape::length(hostXShapeInfo);
// check if xLength is a power of 2, and use bitonic sort, if that's the case
if ((xLength != 0) && ((xLength & (xLength - 1)) == 0)) {
int numThreads = nd4j::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
for (int k = 2; k <= xLength; k = 2*k) {
for (int j = k >> 1; j > 0; j = j >> 1) {
hipLaunchKernelGGL(( cudaBitonicSortHalf), dim3(numBlocks), dim3(numThreads), 512, *stream, x, xShapeInfo, j, k, xLength, descending);
}
}
} else {
// half is incompatible with radix, so only bitonic here
int numThreads = nd4j::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
numBlocks = nd4j::math::nd4j_min<int>(512, numBlocks);
int max = 2, dg = 0;
while (max < xLength) {
max <<= 1;
dg++;
}
max <<= 1;
for (int window = 2; window < max; window<<=1) {
int n = window;
int rev = 0;
do{
int half = n >> 1;
hipLaunchKernelGGL(( cudaSortHalf), dim3(numBlocks), dim3(numThreads), numThreads * 2 * sizeof(float16), *stream, x, xShapeInfo, n, xLength, rev, descending);
n>>=1;
rev = 1;
} while(n > 1);
}
}
nd4j::DebugHelper::checkErrorCode(stream, "sortHalf(...) failed");
}
void NativeOps::sortTadFloat(Nd4jPointer *extraPointers, float *x, Nd4jLong *xShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool descending) {
// to be implemented
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
hipLaunchKernelGGL(( cudaSortTadFloat), dim3(512), dim3(512), 1088 * sizeof(float), *stream, x, xShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, descending);
nd4j::DebugHelper::checkErrorCode(stream, "sortTadFloat(...) failed");
}
void NativeOps::sortTadHalf(Nd4jPointer *extraPointers, float16 *x, Nd4jLong *xShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool descending) {
// to be implemented
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
hipLaunchKernelGGL(( cudaSortTadHalf), dim3(512), dim3(512), 1088 * sizeof(float16), *stream, x, xShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, descending);
nd4j::DebugHelper::checkErrorCode(stream, "sortTadHalf(...) failed");
}
void NativeOps::sortTadDouble(Nd4jPointer *extraPointers, double *x, Nd4jLong *xShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool descending) {
// to be implemented
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
hipLaunchKernelGGL(( cudaSortTadDouble), dim3(512), dim3(512), 1088 * sizeof(double), *stream, x, xShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, descending);
nd4j::DebugHelper::checkErrorCode(stream, "sortTadDouble(...) failed");
}
void NativeOps::sortCooIndicesFloat(Nd4jPointer *extraPointers, Nd4jLong *indices, float *values, Nd4jLong length, int rank) {
throw std::runtime_error("Not implemented yet");
}
void NativeOps::sortCooIndicesDouble(Nd4jPointer *extraPointers, Nd4jLong *indices, double *values, Nd4jLong length, int rank) {
throw std::runtime_error("Not implemented yet");
}
void NativeOps::sortCooIndicesHalf(Nd4jPointer *extraPointers, Nd4jLong *indices, float16 *values, Nd4jLong length, int rank) {
throw std::runtime_error("Not implemented yet");
}
Nd4jLong NativeOps::encodeBitmapFloat(Nd4jPointer *extraPointers, float *dx, Nd4jLong N, int *dz, float threshold) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto *hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
int *resultPointer = reinterpret_cast<int *>(extraPointers[2]);
int *reductionPointer = reinterpret_cast<int *>(extraPointers[3]);
hipLaunchKernelGGL(( cudaEncodeBitmapFloat), dim3(512), dim3(512), 512 * 2 * sizeof(float) + 384, *stream, dx, N, dz, resultPointer, reductionPointer, threshold);
nd4j::DebugHelper::checkErrorCode(stream, "encodeBitmapFloat(...) failed");
Nd4jLong result = (Nd4jLong) resultPointer[0];
resultPointer[0] = 0;
return result;
}
Nd4jLong NativeOps::encodeBitmapDouble(Nd4jPointer *extraPointers, double *dx, Nd4jLong N, int *dz, float threshold) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
int *resultPointer = reinterpret_cast<int *>(extraPointers[2]);
int *reductionPointer = reinterpret_cast<int *>(extraPointers[3]);
hipLaunchKernelGGL(( cudaEncodeBitmapDouble), dim3(512), dim3(512), 512 * 2 * sizeof(double) + 384, *stream, dx, N, dz, resultPointer, reductionPointer, threshold);
nd4j::DebugHelper::checkErrorCode(stream, "encodeBitmapDouble(...) failed");
Nd4jLong result = (Nd4jLong) resultPointer[0];
resultPointer[0] = 0;
return result;
}
Nd4jLong NativeOps::encodeBitmapHalf(Nd4jPointer *extraPointers, float16 *dx, Nd4jLong N, int *dz, float threshold) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
int *resultPointer = reinterpret_cast<int *>(extraPointers[2]);
int *reductionPointer = reinterpret_cast<int *>(extraPointers[3]);
hipLaunchKernelGGL(( cudaEncodeBitmapHalf), dim3(512), dim3(512), (512 * sizeof(float16)) + (512 * sizeof(int)) + 384, *stream, dx, N, dz, resultPointer, reductionPointer, threshold);
nd4j::DebugHelper::checkErrorCode(stream, "execBitmapHalf(...) failed");
Nd4jLong result = (Nd4jLong) resultPointer[0];
resultPointer[0] = 0;
return result;
}
void NativeOps::decodeBitmapFloat(Nd4jPointer *extraPointers, void *dx, Nd4jLong N, float *dz) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
hipLaunchKernelGGL(( cudaDecodeBitmapFloat), dim3(512), dim3(512), 512 * sizeof(float) + 384, *stream, dx, N, dz);
nd4j::DebugHelper::checkErrorCode(stream, "decodeBitmapFloat(...) failed");
}
void NativeOps::decodeBitmapDouble(Nd4jPointer *extraPointers, void *dx, Nd4jLong N, double *dz) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
hipLaunchKernelGGL(( cudaDecodeBitmapDouble), dim3(512), dim3(512), 512 * sizeof(double) + 384, *stream, dx, N, dz);
nd4j::DebugHelper::checkErrorCode(stream, "decodeBitmapDouble(...) failed");
}
void NativeOps::decodeBitmapHalf(Nd4jPointer *extraPointers, void *dx, Nd4jLong N, float16 *dz) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
hipLaunchKernelGGL(( cudaDecodeBitmapHalf), dim3(512), dim3(512), 512 * sizeof(float16) + 384, *stream, dx, N, dz);
nd4j::DebugHelper::checkErrorCode(stream, "decodeBitmapDouble(...) failed");
}
Nd4jLong* NativeOps::mmapFile(Nd4jPointer *extraPointers, const char *fileName, Nd4jLong length) {
return nullptr;
}
void NativeOps::munmapFile(Nd4jPointer *extraPointers, Nd4jLong* ptrMap, Nd4jLong length) {
}
Nd4jPointer NativeOps::executeProtoGraphFloat(Nd4jPointer *extraPointers, Nd4jPointer protoBufferPointer) {
return nullptr;
}
Nd4jPointer NativeOps::executeProtoGraphFloat(Nd4jPointer *extraPointers, const char *fileName) {
return nullptr;
}
nd4j::graph::ResultWrapper* NativeOps::executeFlatGraphFloat(Nd4jPointer *extraPointers, Nd4jPointer flatBufferPointer) {
return nullptr;
}
nd4j::graph::ResultWrapper* NativeOps::executeFlatGraphHalf(Nd4jPointer *extraPointers, Nd4jPointer flatBufferPointer) {
return nullptr;
}
nd4j::graph::ResultWrapper* NativeOps::executeFlatGraphDouble(Nd4jPointer *extraPointers, Nd4jPointer flatBufferPointer) {
return nullptr;
}
const char* NativeOps::getAllCustomOps() {
return nd4j::ops::OpRegistrator::getInstance()->getAllCustomOperations();
}
template<typename T>
nd4j::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, nd4j::ops::DeclarableOp<T>* op, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, T* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) {
nd4j::graph::VariableSpace<T> varSpace;
Context<T> block(2, &varSpace);
nd4j::ShapeList inShapes;
for (int e = 0; e < numIArgs; e++)
block.getIArguments()->push_back(iArgs[e]);
for (int e = 0; e < numTArgs; e++)
block.getTArguments()->push_back(tArgs[e]);
for (int e = 0; e < numInputShapes; e++) {
auto shape_ = reinterpret_cast<Nd4jLong *>(inputShapes[e]);
auto buffer_ = reinterpret_cast<T *>(inputBuffers[e]);
auto array = new nd4j::NDArray<T>(buffer_, shape_);
array->triggerAllocationFlag(false, false);
// block should contain references to proper variable
varSpace.putVariable(1, e, array);
block.pickInput(1, e);
inShapes.push_back(shape_);
}
auto shapeList = op->calculateOutputShape(&inShapes, block);
if (varSpace.workspace() != nullptr)
shapeList->detach();
return shapeList;
}
nd4j::ShapeList* NativeOps::calculateOutputShapesFloat(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, float* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationFloat(hash);
return _calculateOutputShapes<float>(extraPointers, op, inputBuffers, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs);
}
nd4j::ShapeList* NativeOps::calculateOutputShapesHalf(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, float16* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationHalf(hash);
return _calculateOutputShapes<float16>(extraPointers, op, inputBuffers, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs);
}
nd4j::ShapeList* NativeOps::calculateOutputShapesDouble(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationDouble(hash);
return _calculateOutputShapes<double>(extraPointers, op, inputBuffers, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs);
}
template<typename T>
nd4j::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, nd4j::ops::DeclarableOp<T>* op, Nd4jPointer* inputShapes, int numInputShapes, T* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) {
nd4j::graph::Context<T> block(1);
nd4j::ShapeList inShapes;
for (int e = 0; e < numIArgs; e++)
block.getIArguments()->push_back(iArgs[e]);
for (int e = 0; e < numTArgs; e++)
block.getTArguments()->push_back(tArgs[e]);
for (int e = 0; e < numInputShapes; e++)
inShapes.push_back(static_cast<Nd4jLong *>(inputShapes[e]));
auto shapeList = op->calculateOutputShape(&inShapes, block);
return shapeList;
}
nd4j::ShapeList* NativeOps::calculateOutputShapesFloat(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputShapes, int numInputShapes, float* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationFloat(hash);
return _calculateOutputShapes<float>(extraPointers, op, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs);
}
nd4j::ShapeList* NativeOps::calculateOutputShapesHalf(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputShapes, int numInputShapes, float16* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationHalf(hash);
return _calculateOutputShapes<float16>(extraPointers, op, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs);
}
nd4j::ShapeList* NativeOps::calculateOutputShapesDouble(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationDouble(hash);
return _calculateOutputShapes<double>(extraPointers, op, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs);
}
template<typename T>
static FORCEINLINE Nd4jStatus realExec(nd4j::ops::DeclarableOp<T>* op, Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, T* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool isInplace) {
if (op == nullptr)
nd4j_printf("Can't find requested operation: [%lld]\n", hash);
// we're using the same fake nodeId everywhere here
std::vector<nd4j::NDArray<T>*> inputs(numInputs);
std::vector<nd4j::NDArray<T>*> outputs(numOutputs);
std::vector<T> ttArgs(numTArgs);
std::vector<Nd4jLong> iiArgs(numIArgs);
// filling block now with inputs
for (int e = 0; e < numInputs; e++) {
auto buffer = reinterpret_cast<T *>(inputBuffers[e]);
auto shape = reinterpret_cast<Nd4jLong *>(inputShapes[e]);
inputs[e] = new nd4j::NDArray<T>(buffer, shape);
}
// if not inplace - transferring output arrays
if (!isInplace)
for (int e = 0; e < numOutputs; e++) {
auto buffer = reinterpret_cast<T *>(outputBuffers[e]);
// we want to keep original output shape intact
auto shape = shape::copyShape(reinterpret_cast<Nd4jLong *>(outputShapes[e]));
auto array = new nd4j::NDArray<T>(buffer, shape);
outputs[e] = array;
// and we want to release shape copy once we're done
array->triggerAllocationFlag(false, true);
}
for (int e = 0; e < numIArgs; e++)
iiArgs[e] = iArgs[e];
for (int e = 0; e < numTArgs; e++)
ttArgs[e] = tArgs[e];
// hypothetically at this point we have everything filled
auto result = op->execute(inputs, outputs, ttArgs, iiArgs, isInplace);
//auto result = op->execute(inputs, ttArgs, iiArgs, isInplace);
if (!isInplace)
for (int e = 0; e < numOutputs; e++) {
//shape::printShapeInfoLinear("JVM output shape", (int *) outputShapes[e]);
//shape::printShapeInfoLinear("C++ output shape", (int *) outputs[e]->shapeInfo());
//outputs[e]->printIndexedBuffer("C++ raw output");
//outputs[e]->printBuffer("C++ indexed output");
if (outputs[e]->ordering() != shape::order(reinterpret_cast<Nd4jLong *>(outputShapes[e])))
outputs[e]->streamline(shape::order(reinterpret_cast<Nd4jLong *>(outputShapes[e])));
}
/*
if (!isInplace) {
if (result->size() != numOutputs) {
return ND4J_STATUS_BAD_OUTPUT;
}
for (int e = 0; e < numOutputs; e++) {
auto buffer = (T *) outputBuffers[e];
auto shape = (int *) outputShapes[e];
nd4j::NDArray<T> tmp(buffer, shape);
if (tmp.lengthOf() != result->at(e)->lengthOf()) {
nd4j_printf("Provided output array for [%s] has length of %i, but actual result has length of %i\n", op->getOpName()->c_str(), tmp.lengthOf(), result->at(e)->lengthOf());
return ND4J_STATUS_BAD_OUTPUT;
}
tmp.assign(result->at(e));
}
} else {
// if op is inplace, our ResultSet holds pointers
result->purge();
}
delete result;
*/
for (auto v: inputs)
delete v;
for (auto v: outputs)
delete v;
return Status::OK();
}
int NativeOps::execCustomOpFloat(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, float* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool isInplace) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationFloat(hash);
return realExec<float>(op, extraPointers, hash, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs, tArgs, numTArgs, iArgs, numIArgs, isInplace);
}
int NativeOps::execCustomOpDouble(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool isInplace) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationDouble(hash);
return realExec<double>(op, extraPointers, hash, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs, tArgs, numTArgs, iArgs, numIArgs, isInplace);
}
int NativeOps::execCustomOpHalf(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, float16* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool isInplace) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationHalf(hash);
return realExec<float16>(op, extraPointers, hash, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs, tArgs, numTArgs, iArgs, numIArgs, isInplace);
}
int NativeOps::registerGraphFloat(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer flatBufferPointer) {
auto graph = nd4j::graph::GraphExecutioner<float>::importFromFlatPointer(flatBufferPointer);
nd4j::graph::GraphHolder::getInstance()->registerGraph(graphId, graph);
return ND4J_STATUS_OK;
}
int NativeOps::registerGraphDouble(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer flatBufferPointer) {
auto graph = nd4j::graph::GraphExecutioner<double>::importFromFlatPointer(flatBufferPointer);
nd4j::graph::GraphHolder::getInstance()->registerGraph(graphId, graph);
return ND4J_STATUS_OK;
}
int NativeOps::registerGraphHalf(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer flatBufferPointer) {
auto graph = nd4j::graph::GraphExecutioner<float16>::importFromFlatPointer(flatBufferPointer);
nd4j::graph::GraphHolder::getInstance()->registerGraph(graphId, graph);
return ND4J_STATUS_OK;
}
template <typename T>
static VariablesSet<T>* executeStoredGraphT(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) {
auto graph = nd4j::graph::GraphHolder::getInstance()->pullGraph<T>(graphId);
auto varSpace = graph->getVariableSpace()->clone();
std::vector<nd4j::NDArray<T> *> handles;
for (int e = 0; e < numInputs; e++) {
auto idx = inputIndices[e];
// we'll delete this array later, together with cloned VariableSpace
auto array = new nd4j::NDArray<T>(reinterpret_cast<T *>(inputBuffers[e]), reinterpret_cast<Nd4jLong *>(inputShapes[e]));
handles.emplace_back(array);
if (varSpace->hasVariable(idx)) {
auto var = varSpace->getVariable(idx);
if (var->hasNDArray())
delete var->getNDArray();
var->setNDArray(array);
} else
varSpace->putVariable(idx, array);
}
auto result = nd4j::graph::GraphExecutioner<T>::execute(graph, varSpace);
auto varSet = new nd4j::graph::VariablesSet<T>(result);
if (result == ND4J_STATUS_OK) {
// pull back results, and provide them
auto outputs = graph->fetchOutputs();
for (int e = 0; e < outputs->size(); e++) {
// we're only getting variable ID/Index from original grap. values will be taken from cloned workspace
std::pair<int, int> varId(outputs->at(e)->id(), outputs->at(e)->index());
auto var = varSpace->getVariable(varId);
varSet->push_back(var->clone());
}
delete outputs;
}
delete varSpace;
return varSet;
}
VariablesSet<float>* NativeOps::executeStoredGraphFloat(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) {
return executeStoredGraphT<float>(extraPointers, graphId, inputBuffers, inputShapes, inputIndices, numInputs);
}
VariablesSet<float16>* NativeOps::executeStoredGraphHalf(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) {
return executeStoredGraphT<float16>(extraPointers, graphId, inputBuffers, inputShapes, inputIndices, numInputs);
}
VariablesSet<double>* NativeOps::executeStoredGraphDouble(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) {
return executeStoredGraphT<double>(extraPointers, graphId, inputBuffers, inputShapes, inputIndices, numInputs);
}
int NativeOps::unregisterGraph(Nd4jPointer *extraPointers, Nd4jLong graphId) {
nd4j::graph::GraphHolder::getInstance()->dropGraphAny(graphId);
return ND4J_STATUS_OK;
}
void NativeOps::deletePointerArray(Nd4jPointer pointer) {
Nd4jPointer *ptr = reinterpret_cast<Nd4jPointer *>(pointer);
delete[] ptr;
}
void NativeOps::deleteIntArray(Nd4jPointer pointer) {
auto ptr = reinterpret_cast<int *>(pointer);
delete[] ptr;
}
void NativeOps::deleteLongArray(Nd4jPointer pointer) {
auto ptr = reinterpret_cast<Nd4jLong *>(pointer);
delete[] ptr;
}
template <typename T>
static void deleteVariablesSetT(Nd4jPointer pointer) {
nd4j::graph::VariablesSet<T>* ptr = reinterpret_cast<nd4j::graph::VariablesSet<T>*>(pointer);
delete ptr;
}
void NativeOps::deleteVariablesSetFloat(Nd4jPointer pointer) {
deleteVariablesSetT<float>(pointer);
}
void NativeOps::deleteVariablesSetHalf(Nd4jPointer pointer) {
deleteVariablesSetT<float16>(pointer);
}
void NativeOps::deleteVariablesSetDouble(Nd4jPointer pointer) {
deleteVariablesSetT<double>(pointer);
}
void NativeOps::deleteShapeList(Nd4jPointer shapeList) {
nd4j::ShapeList* list = reinterpret_cast<nd4j::ShapeList*>(shapeList);
list->destroy();
delete list;
}
const char* NativeOps::getAllOperations() {
return nd4j::OpTracker::getInstance()->exportOperations();
}
Nd4jPointer NativeOps::getGraphStateHalf(Nd4jLong id) {
return (Nd4jPointer) new nd4j::graph::GraphState<float16>(id);
}
Nd4jPointer NativeOps::getGraphStateFloat(Nd4jLong id) {
return (Nd4jPointer) new nd4j::graph::GraphState<float>(id);
}
Nd4jPointer NativeOps::getGraphStateDouble(Nd4jLong id) {
return (Nd4jPointer) new nd4j::graph::GraphState<double>(id);
}
void NativeOps::deleteGraphStateHalf(Nd4jPointer state) {
auto stateP = reinterpret_cast<nd4j::graph::GraphState<float16> *>(state);
delete stateP;
}
void NativeOps::deleteGraphStateFloat(Nd4jPointer state) {
auto stateP = reinterpret_cast<nd4j::graph::GraphState<float> *>(state);
delete stateP;
}
void NativeOps::deleteGraphStateDouble(Nd4jPointer state) {
auto stateP = reinterpret_cast<nd4j::graph::GraphState<double> *>(state);
delete stateP;
}
template <typename T>
Nd4jStatus execCustomOpWithScope(Nd4jPointer *extraPointers, nd4j::graph::GraphState<T> *state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) {
/**
* That's basically exec, with VariableSpace provided in GraphState:
* depending on operation (i.e. while of if), different logic executors could be used
*/
auto graph = state->graph();
auto varSpace = state->variableSpace();
// Node is dynamically created, and has nothing beyond it: only inputs and outputs
// this node has id of 0, and inputs are
nd4j::graph::Node<T> node(OpType_LOGIC, opHash, 0);
// mapping inputs
for (int e = 0; e < numInputs; e++) {
auto buffer = reinterpret_cast<T *>(inputBuffers[e]);
auto shapeInfo = reinterpret_cast<Nd4jLong *>(inputShapes[e]);
auto array = new nd4j::NDArray<T>(buffer, shapeInfo, varSpace->workspace());
// now we just put array to VarSpace
varSpace->putVariable(0, e, array);
node.pickInput(0, e);
}
// mapping scopes
for (int e = 0; e < numScopes; e++) {
// we should check scope existence in GraphState/Graph
int scopeId = (int) scopes[e];
if (!state->hasScope(scopeId)) {
nd4j_printf("execCustomOpWithScope: referenced scope [%i] doesn't exist\n", scopeId);
return Status::THROW();
}
node.pickInput(scopeId, 0);
}
auto result = LogicExecutor<T>::processNode(graph, &node);
if (result != Status::OK())
return result;
// mapping outputs
for (int e = 0; e < numOutputs; e++) {
auto buffer = reinterpret_cast<T *>(outputBuffers[e]);
auto shapeInfo = reinterpret_cast<Nd4jLong *>(outputShapes[e]);
nd4j::NDArray<T> array(buffer, shapeInfo, varSpace->workspace());
// now we just put array to VarSpace to the same ID
//varSpace->putVariable(0, e, array);
auto t = varSpace->getVariable(0, e)->getNDArray();
array.assign(t);
}
// removing input variables
for (int e = 0; e < numInputs; e++) {
varSpace->dropVariable(0, e);
}
// after some bla-bla-bla we should have Graph and Node for current op
return Status::OK();
}
Nd4jStatus NativeOps::execCustomOpWithScopeHalf(Nd4jPointer *extraPointers, Nd4jPointer state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) {
return execCustomOpWithScope<float16>(extraPointers, reinterpret_cast<nd4j::graph::GraphState<float16> *>(state), opHash, scopes, numScopes, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs);
}
Nd4jStatus NativeOps::execCustomOpWithScopeFloat(Nd4jPointer *extraPointers, Nd4jPointer state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) {
return execCustomOpWithScope<float>(extraPointers, reinterpret_cast<nd4j::graph::GraphState<float> *>(state), opHash, scopes, numScopes, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs);
}
Nd4jStatus NativeOps::execCustomOpWithScopeDouble(Nd4jPointer *extraPointers, Nd4jPointer state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) {
return execCustomOpWithScope<double>(extraPointers, reinterpret_cast<nd4j::graph::GraphState<double> *>(state), opHash, scopes, numScopes, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs);
}
void NativeOps::deleteResultWrapper(Nd4jPointer ptr) {
// just 0 room for compiler s@!t
auto p = reinterpret_cast<nd4j::graph::ResultWrapper *>(ptr);
delete p;
}
/*
* TypeDef:
* void convertTypes(Nd4jPointer *extras, int srcType, Nd4jPointer x, long N, int dstType, Nd4jPointer z);
*/
void NativeOps::convertTypes(Nd4jPointer *extras, int srcType, Nd4jPointer x, Nd4jLong N, int dstType, Nd4jPointer z) {
auto dx = reinterpret_cast<void *>(x);
auto dz = reinterpret_cast<void *>(z);
if (srcType == ND4J_FLOAT8) {
if (dstType == ND4J_FLOAT8) {
// convertKernel<double, nd4j::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
nd4j::TypeCast::convertGeneric<nd4j::float8, nd4j::int8>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
nd4j::TypeCast::convertGeneric<nd4j::float8, nd4j::uint8>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
nd4j::TypeCast::convertGeneric<nd4j::float8, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
nd4j::TypeCast::convertGeneric<nd4j::float8, nd4j::int16>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
nd4j::TypeCast::convertGeneric<nd4j::float8, nd4j::uint16>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_FLOAT32) {
nd4j::TypeCast::convertGeneric<nd4j::float8, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
nd4j::TypeCast::convertGeneric<nd4j::float8, double>(extras, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_INT8) {
if (dstType == ND4J_FLOAT8) {
nd4j::TypeCast::convertGeneric<nd4j::int8, nd4j::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
//convertKernel<nd4j::int8, nd4j::int8>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
nd4j::TypeCast::convertGeneric<nd4j::int8, nd4j::uint8>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
nd4j::TypeCast::convertGeneric<nd4j::int8, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
nd4j::TypeCast::convertGeneric<nd4j::int8, nd4j::int16>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
nd4j::TypeCast::convertGeneric<nd4j::int8, nd4j::uint16>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO: eventually we might want to add it
} else if (dstType == ND4J_FLOAT32) {
nd4j::TypeCast::convertGeneric<nd4j::int8, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
nd4j::TypeCast::convertGeneric<nd4j::int8, double>(extras, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_UINT8) {
if (dstType == ND4J_FLOAT8) {
nd4j::TypeCast::convertGeneric<nd4j::uint8, nd4j::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
nd4j::TypeCast::convertGeneric<nd4j::uint8, nd4j::int8>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
nd4j::TypeCast::convertGeneric<nd4j::uint8, nd4j::uint8>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
nd4j::TypeCast::convertGeneric<nd4j::uint8, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
nd4j::TypeCast::convertGeneric<nd4j::uint8, nd4j::int16>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
nd4j::TypeCast::convertGeneric<nd4j::uint8, nd4j::uint16>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO: still might want to add
} else if (dstType == ND4J_FLOAT32) {
nd4j::TypeCast::convertGeneric<nd4j::uint8, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
nd4j::TypeCast::convertGeneric<nd4j::uint8, double>(extras, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_FLOAT16) {
if (dstType == ND4J_FLOAT8) {
nd4j::TypeCast::convertGeneric<float16, nd4j::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
nd4j::TypeCast::convertGeneric<float16, nd4j::int8>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
nd4j::TypeCast::convertGeneric<float16, nd4j::uint8>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
nd4j::TypeCast::convertGeneric<float16, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
nd4j::TypeCast::convertGeneric<float16, nd4j::int16>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
nd4j::TypeCast::convertGeneric<float16, nd4j::uint16>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO: .... ^^^
} else if (dstType == ND4J_FLOAT32) {
nd4j::TypeCast::convertGeneric<float16, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
nd4j::TypeCast::convertGeneric<float16, double>(extras, dx, N, dz);
} else if (dstType == ND4J_THRESHOLD) {
//nd4j::convertToThreshold<float16>(nullptr, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_INT16) {
if (dstType == ND4J_FLOAT8) {
nd4j::TypeCast::convertGeneric<nd4j::int16, nd4j::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
nd4j::TypeCast::convertGeneric<nd4j::int16, nd4j::int8>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
nd4j::TypeCast::convertGeneric<nd4j::int16, nd4j::uint8>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
nd4j::TypeCast::convertGeneric<nd4j::int16, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
nd4j::TypeCast::convertGeneric<nd4j::int16, nd4j::int16>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
nd4j::TypeCast::convertGeneric<nd4j::int16, nd4j::uint16>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO...
} else if (dstType == ND4J_FLOAT32) {
nd4j::TypeCast::convertGeneric<nd4j::int16, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
nd4j::TypeCast::convertGeneric<nd4j::int16, double>(extras, dx, N, dz);
} else {
printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_FLOAT24) {
} else if (srcType == ND4J_FLOAT32) {
if (dstType == ND4J_FLOAT8) {
nd4j::TypeCast::convertGeneric<float, nd4j::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
nd4j::TypeCast::convertGeneric<float, nd4j::int8>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
nd4j::TypeCast::convertGeneric<float, nd4j::uint8>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
nd4j::TypeCast::convertGeneric<float, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
nd4j::TypeCast::convertGeneric<float, nd4j::int16>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
nd4j::TypeCast::convertGeneric<float, nd4j::uint16>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_DOUBLE) {
nd4j::TypeCast::convertGeneric<float, double>(extras, dx, N, dz);
} else if (dstType == ND4J_THRESHOLD) {
//nd4j::convertToThreshold<float>(nullptr, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_DOUBLE) {
if (dstType == ND4J_FLOAT8) {
nd4j::TypeCast::convertGeneric<double, nd4j::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
nd4j::TypeCast::convertGeneric<double, nd4j::int8>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
nd4j::TypeCast::convertGeneric<double, nd4j::uint8>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
nd4j::TypeCast::convertGeneric<double, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
nd4j::TypeCast::convertGeneric<double, nd4j::int16>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
nd4j::TypeCast::convertGeneric<double, nd4j::uint16>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_FLOAT32) {
nd4j::TypeCast::convertGeneric<double, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
//
} else if (dstType == ND4J_THRESHOLD) {
//nd4j::convertToThreshold<double>(nullptr, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_THRESHOLD) {
if (dstType == ND4J_FLOAT16) {
//nd4j::convertFromThreshold<float16>(nullptr, dx, N, dz);
} else if (dstType == ND4J_FLOAT32) {
//nd4j::convertFromThreshold<float>(nullptr, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
//nd4j::convertFromThreshold<double>(nullptr, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
}
|
7696aa25237648ec5e3ce33b18cad50ca72a94d3.cu
|
#include "../NativeOps.h"
#include <cuda.h>
#include <cuda_launch_config.h>
#include <buffer.h>
#include <helpers/shape.h>
#include "../Environment.h"
#include <helpers/TAD.h>
#include <ops/specials.h>
#include <loops/reduce3.h>
#include <loops/reduce.h>
#include <loops/indexreduce.h>
#include <loops/pairwise_transform.h>
#include <loops/transform.h>
#include <loops/scalar.h>
#include <loops/broadcasting.h>
#include <loops/summarystatsreduce.h>
#include <loops/random.h>
//#include <thread>
#include <map>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <cuda_runtime.h>
#include <cuda_device_runtime_api.h>
#include <pointercast.h>
#include <stdio.h>
#include <stdlib.h>
#include <loops/type_conversions.h>
#include <op_boilerplate.h>
#include <loops/grid_shaped.h>
#include <loops/grid_strided.h>
#include <loops/aggregates.h>
#include <helpers/threshold.h>
#include <ShapeList.h>
#include <Context.h>
#include <ops/specials_cuda.h>
// FIXME: we need cuda-specific implementations
#include <helpers/logger.h>
#include <NDArray.h>
#include <NDArrayFactory.h>
#include <GraphExecutioner.h>
#include <graph/GraphHolder.h>
#include <graph/VariablesSet.h>
#include <ops/declarable/OpRegistrator.h>
#include <ops/declarable/CustomOperations.h>
//#include <sys/time.h>
// b40c only available for gcc :(
#ifdef __clang__
// do nothing
#elif __GNUC__
#include <b40c/util/error_utils.cuh>
#include <b40c/util/multiple_buffering.cuh>
#include <b40c/radix_sort/enactor.cuh>
#endif
#include <curand.h>
#include <Status.h>
#include <helpers/DebugHelper.h>
using namespace nd4j;
#include <loops/special_kernels.h>
cudaDeviceProp *deviceProperties;
cudaFuncAttributes *funcAttributes = new cudaFuncAttributes[64];
int blockLimit = 128;
int maxThreads = 512;
bool allowedP2P = false;
bool supportedP2P = false;
#ifdef __EXPERIMENTAL__
bool experimentalSupport = true;
#else
bool experimentalSupport = false;
#endif
int minThreads = 32;
__constant__ char deviceConstantMemory[49152];
typedef struct {
long streamId;
long callId;
} __syncInfo;
typedef __syncInfo SyncInfo;
// this method isn't used, left here for legacy and caution purposes
// TLDR: don't use this way, it sucks
void CUDART_CB syncCallback(cudaStream_t stream, cudaError_t status, void *data){
SyncInfo *sync = reinterpret_cast<SyncInfo *>(data);
printf("Finished stream: [%i], kernel call: [%i]\n", sync->streamId, sync->callId);
}
// this method just does type conversion in fancy way
int getDeviceId(Nd4jPointer ptrToDeviceId) {
return (int)(Nd4jLong)ptrToDeviceId;
}
template <typename T>
dim3 getOptimalDimensions(Nd4jLong n,cudaFuncAttributes attributes, cudaDeviceProp properties) {
// we can combine the two to compute a block size
int num_threads = block_size_with_maximum_potential_occupancy(attributes, properties);
// no real sense launching more threads, then number of elements we have
if (num_threads > n) num_threads = n;
if (maxThreads > 0 && num_threads > maxThreads) num_threads = maxThreads;
// compute the number of blocks of size num_threads to launch
int num_blocks = n / num_threads;
// check for partial block at the end
if (num_blocks > blockLimit) num_blocks = blockLimit;
if (num_blocks < 4 && n > 128) {
num_blocks = 4;
num_threads = n / num_blocks;
}
if (num_threads >= 768) {
num_blocks = num_blocks * 2;
num_threads = num_threads / 2;
}
if(n % num_threads && num_blocks < blockLimit) ++num_blocks;
//(num_threads * sizeof(T)) + attributes.sharedSizeBytes);
return dim3(num_blocks,num_threads, 3000);
}
int getBaseMemorySize(int xRank, cudaFuncAttributes funcAttr) {
int memory_limit = 256; //funcAttr.sharedSizeBytes;
// TODO: remove this later
memory_limit += sizeof(UnifiedSharedMemory) + 32; // sizeof(shape::TAD) + (xRank * 4 * 4)
/*
if (xRank == 0) xRank = 2;
memory_limit += (xRank * 2 + 4) * 3 * 4; // we reserve memory for xShape + T1/T2 shapes
memory_limit += yRank == 0 ? 0 : (yRank * 2 + 4) * 4;
memory_limit += zRank == 0 ? 0 : (zRank * 2 + 4) * 4;
memory_limit += (xRank * 4) * 6;
memory_limit += MAX_RANK * 4; // special case, needed roughtly in one pase
*/
return memory_limit;
}
/*
* Basic CUDA constants here: number of blocks per MP
*/
int getDeviceBlockThreshold(int deviceId) {
int ccMinor = deviceProperties[deviceId].minor;
int ccMajor = deviceProperties[deviceId].major;
int blockThreshold = 8;
if (ccMajor >= 5)
blockThreshold = 32;
else if (ccMajor == 3)
blockThreshold = 16;
else if (ccMajor < 3)
blockThreshold = 8;
return blockThreshold;
}
dim3 getBasicLaunchParams(int deviceId, long problemLength, int sharedMemoryPerThread, cudaFuncAttributes funcAttr) {
int countMP = deviceProperties[deviceId].multiProcessorCount;
int blockThreshold = getDeviceBlockThreshold(deviceId);
int num_threads = problemLength / (countMP * blockThreshold);
num_threads = nd4j::math::nd4j_min<int>(num_threads, maxThreads);
num_threads = nd4j::math::nd4j_max<int>(num_threads, 64);
num_threads = nd4j::math::nd4j_max<int>(num_threads, minThreads);
int num_blocks = nd4j::math::nd4j_max<int>(problemLength / num_threads, 1);
num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit);
int memory_limit = (sharedMemoryPerThread * num_threads) + getBaseMemorySize(1, funcAttr);
dim3 launchDims = dim3(num_blocks, num_threads, memory_limit);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Preliminary basic launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i]\n", num_blocks, num_threads, memory_limit);
return launchDims;
}
/*
* This message returns shared memory threshold value. default overflow ratio is 0.3
*/
int getDeviceSharedThreshold(int deviceId) {
int ccMinor = deviceProperties[deviceId].minor;
int ccMajor = deviceProperties[deviceId].major;
// please note threshold isn't multiple of 32, and that's NOT a mistake
int shmemThreshold;
if (ccMajor == 6 && ccMinor == 0)
shmemThreshold = 65536;
else if (ccMajor == 6 && ccMinor == 1)
shmemThreshold = 49152;
else if (ccMajor == 5 && ccMinor == 2)
shmemThreshold = 98304;
else if (ccMajor == 5)
shmemThreshold = 65536;
else if (ccMajor == 3 && ccMinor == 7)
shmemThreshold = 114688;
else shmemThreshold = 49152;
return shmemThreshold / 0.3;
}
dim3 getBetterDimensions(int deviceId, int numTads, int tadLength, int xRank, cudaFuncAttributes funcAttr, int dimensionLength, int elementSize, int reduction) {
int num_threads = nd4j::math::nd4j_min<int>(tadLength, maxThreads);
int countMP = deviceProperties[deviceId].multiProcessorCount;
int regPerBlock = deviceProperties[deviceId].regsPerBlock;
int warpSize = deviceProperties[deviceId].warpSize;
int blockThreshold = getDeviceBlockThreshold(deviceId);
int shmemThreshold = getDeviceSharedThreshold(deviceId);
// round num_threads to nearest warpSize
num_threads -= num_threads % warpSize;
num_threads = nd4j::math::nd4j_max<int>(1, num_threads);
if (num_threads < warpSize && tadLength < warpSize)
num_threads = tadLength;
// since we use shared memory as fast memory for some cases - we need to count that in
int memory_limit = getBaseMemorySize(xRank, funcAttr);
int memory_floor = memory_limit;
int effective_block_limit = countMP * blockThreshold;
int num_blocks = numTads; //nd4j::math::nd4j_min<int>(numTads, effective_block_limit);
int desiredShared = shmemThreshold / nd4j::math::nd4j_max<int>((num_blocks / countMP), 1);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Launch context: numBlocks: [%i], numThreads: [%i], countMap: [%i], shmemThreshold: [%i], desiredShared: [%i], elementSize: [%i]\n", num_blocks, num_threads, countMP, shmemThreshold, desiredShared, elementSize);
// at this moment we've stored all required information for things. time to count in reduction multipliers
int reduction_per_block = 0;
bool found = false;
if (reduction > 0)
while (!found) {
reduction_per_block = (num_threads * elementSize * reduction);
if (memory_limit + reduction_per_block < desiredShared) {
memory_limit += reduction_per_block;
found = true;
} else {
if (num_threads > minThreads) {
num_threads -= 32;
} else {
memory_limit += reduction_per_block;
found = true;
}
}
}
// at this moment we know total memory used per block, and we also know per-mp limit.
int max_active_blocks = shmemThreshold / nd4j::math::nd4j_max<int>(memory_limit, 1);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("MAB: [%i], memory_floor: [%i], memory_limit: [%i], reductionPerBlock: [%i]\n", max_active_blocks, memory_floor, memory_limit, reduction_per_block);
// we don't want to spawn more blocks, that gpu can actually handle without queue
//num_blocks = nd4j::math::nd4j_min<int>(num_blocks, max_active_blocks);
num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit);
// if (num_blocks > countMP)
// num_blocks = num_blocks - (num_blocks % countMP);
num_blocks = nd4j::math::nd4j_max<int>(num_blocks, 1);
int targetBlocksPerMP = num_blocks / countMP;
// now we know desired number of blocks wrt to shared memory. So, now we should take in account number of threads per SM
if (targetBlocksPerMP * num_threads > 2048) {
while (targetBlocksPerMP * num_threads > 2048) {
if (num_threads <= minThreads)
break;
num_threads -= 32;
}
reduction_per_block = (num_threads * elementSize * reduction);
memory_limit = memory_floor + reduction_per_block;
}
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Preliminary reduce launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i], reduction_per_block: [%i], blocksPerMP: [%i]\n", num_blocks, num_threads, memory_limit, reduction_per_block, targetBlocksPerMP);
return dim3(num_blocks,num_threads, memory_limit);
}
/*
* This method returns kernel launch param for linear memory access
*/
dim3 getFlatLaunchParams(int deviceId, Nd4jLong *xShapeInfo, Nd4jLong *yShapeInfo, cudaFuncAttributes funcAttr) {
auto xRank = shape::rank(xShapeInfo);
auto yRank = yShapeInfo == nullptr ? 0 : shape::rank(yShapeInfo);
auto zRank = 0;
int memory_limit = getBaseMemorySize(xRank, funcAttr);
int countMP = deviceProperties[deviceId].multiProcessorCount;
int regPerBlock = deviceProperties[deviceId].regsPerBlock;
int blockThreshold = getDeviceBlockThreshold(deviceId);
int shmemThreshold = getDeviceSharedThreshold(deviceId);
auto xLength = shape::length(xShapeInfo);
int effective_block_limit = countMP * blockThreshold;
// for flat calls we just want as much concurrent blocks, as possible, and we're not tied to TAD here
int num_threads = xLength / effective_block_limit;
if (num_threads < minThreads)
num_threads = minThreads;
num_threads = num_threads - (num_threads % 32);
int memory_floor = memory_limit;
int num_blocks = xLength / num_threads;
num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit);
// num_blocks = nd4j::math::nd4j_min<int>(num_blocks, effective_block_limit);
num_blocks = nd4j::math::nd4j_max<int>(num_blocks, 1);
int targetBlocksPerMP = num_blocks / countMP;
// now we know desired number of blocks wrt to shared memory. So, now we should take in account number of threads per SM
if (targetBlocksPerMP * num_threads > 2048 && num_threads >= 128) {
while (targetBlocksPerMP * num_threads > 2048) {
if (num_threads <= minThreads)
break;
num_threads -= 32;
}
}
if (xLength / num_threads > blockLimit)
num_blocks *= 2;
dim3 launchDims = dim3(num_blocks, num_threads, memory_limit);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Preliminary scalar launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i], blocksPerMP: [%i], problemLength: [%i], effectiveBlockLimit: [%i]\n", num_blocks, num_threads, memory_limit, targetBlocksPerMP, xLength, effective_block_limit);
return launchDims;
}
/**
* This method returns kernel launch params with TAD-based memory access
*
* @param deviceId
* @param xShapeInfo
* @param tadShapeInfo
* @param funcAttr
* @param dimensionLength
* @param elementSize
* @param reductionSize
* @return
*/
dim3 getReduceLaunchParams(int deviceId, Nd4jLong *xShapeInfo, Nd4jLong *tadShapeInfo, cudaFuncAttributes funcAttr, int dimensionLength, int elementSize, int reductionSize) {
Nd4jLong tadLength = 0;
Nd4jLong numTads = 0;
if (tadShapeInfo != nullptr) {
tadLength = shape::length(tadShapeInfo);
numTads = shape::length(xShapeInfo) / tadLength;
if (tadLength == 1) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("A xLength: [%i], zLength: [%i]\n", shape::length(xShapeInfo), shape::length(tadShapeInfo));
}
} else{
// we have special case - reduction along all dimensions
tadLength = nd4j::math::nd4j_min<int>(shape::length(xShapeInfo), 768);
numTads = shape::length(xShapeInfo) / tadLength;
}
auto xRank = shape::rank(xShapeInfo);
int zRank = tadShapeInfo == nullptr ? 0 : shape::rank(tadShapeInfo);
dim3 launchDims = getBetterDimensions(deviceId, numTads, tadLength, xRank, funcAttr, dimensionLength, elementSize, reductionSize);
if (nd4j::Environment::getInstance()->isDebugAndVerbose()) { //|| launchDims.x == 1
printf("Reduce LaunchParams: xLength: [%i], numTads: [%i], tadLength: [%i], launchDims.x: [%i], launchDims.y: [%i], launchDims.z: [%i]\n", shape::length(xShapeInfo), numTads, tadLength, launchDims.x, launchDims.y, launchDims.z);
}
return launchDims;
}
/**
* Returns optimal launch parameters
* given the extra pointers passed in.
* The extra pointer should be
* the host pointer for the shape information
* associated with the data.
* From there it is used to obtain the length
* from which we can derive the optimal launch parameters.
*
*/
template <typename T>
dim3 getOptimalLaunchParameters(Nd4jPointer *extraPointers, cudaFuncAttributes attributes, cudaDeviceProp properties) {
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto n = shape::length(hostXShapeInfo);
dim3 launchDims = getOptimalDimensions<T>(n,attributes, properties);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Params: gridSize: [%i], blockSize: [%i], shMem: [%i], problemLength: [%i], totalThreads:[%i]\n", launchDims.x, launchDims.y, launchDims.z, n, (launchDims.x * launchDims.y));
return launchDims;
}
nd4j::buffer::Buffer<Nd4jLong> * createScalarBuffer(cudaStream_t stream) {
Nd4jLong *scalarShapeInfo = shape::createScalarShapeInfo();
nd4j::buffer::Buffer<Nd4jLong> *buff = nd4j::buffer::createBuffer(scalarShapeInfo,shape::shapeInfoLength(2), stream);
nd4j::buffer::copyDataToGpu(&buff, stream);
return buff;
}
class ScalarShapeInformation {
private:
nd4j::buffer::Buffer<Nd4jLong> *scalarDimension;
nd4j::buffer::Buffer<Nd4jLong> *scalarShapeInfo;
// std::thread::id threadId;
public:
ScalarShapeInformation(cudaStream_t stream) {
auto scalarDimensionBuff = reinterpret_cast<Nd4jLong *>(malloc(sizeof(Nd4jLong)));
CHECK_ALLOC(scalarDimensionBuff, "Failed to allocate ShapeInfoBuffer");
scalarDimensionBuff[0] = MAX_DIMENSION;
scalarDimension = nd4j::buffer::createBuffer(scalarDimensionBuff,1, stream);
scalarShapeInfo = createScalarBuffer(stream);
// threadId = std::this_thread::get_id();
}
~ScalarShapeInformation() {
nd4j::buffer::freeBuffer(&scalarShapeInfo);
nd4j::buffer::freeBuffer(&scalarDimension);
}
Nd4jLong *getShapeInfoHostPointer() {
return scalarShapeInfo->data;
}
Nd4jLong * getShapeInfoGpuPointer() {
return scalarShapeInfo->gData;
}
Nd4jLong * getDimensionHostPointer() {
return scalarDimension->data;
}
Nd4jLong * getDimensionGpuPointer() {
return scalarDimension->gData;
}
};
template <typename T>
class ScalarInfo {
nd4j::buffer::Buffer<T> *scalarData;
ScalarShapeInformation *shapeInfo;
T finalResult;
cudaStream_t streamRef;
public:
ScalarInfo(cudaStream_t stream) {
T *scalarResult = reinterpret_cast<T*>(malloc(sizeof(T)));
CHECK_ALLOC(scalarResult, "Failed to allocate new scalar buffer");
shapeInfo = new ScalarShapeInformation(stream);
scalarData = nd4j::buffer::createBuffer(scalarResult,1, stream);
streamRef = stream;
nd4j::buffer::copyDataToGpu(&scalarData, stream);
}
T getFinalResultFromDevice() {
nd4j::buffer::copyDataFromGpu(&scalarData, streamRef);
return scalarData->data[0];
}
/**
* Get the device shape information
* representing a scalar
*/
Nd4jLong *getDeviceShapeInfo() {
return shapeInfo->getShapeInfoGpuPointer();
}
/**
* Get the result pointers
*/
T *getDevicePointer() {
return scalarData->gData;
}
/**
* Get the infinite dimension device pointer
*/
Nd4jLong *getDimensionDevicePointer() {
return shapeInfo->getDimensionGpuPointer();
}
~ScalarInfo() {
nd4j::buffer::freeBuffer(&scalarData);
delete shapeInfo;
}
};
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
*/
double NativeOps::execIndexReduceScalarDouble(Nd4jPointer *extraPointers,int opNum,
double *x,
Nd4jLong *xShapeInfo,
double *extraParams) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
Nd4jLong *hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
Nd4jLong *hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
Nd4jLong *deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
Nd4jLong *deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D1 opNum:[%i]\n", opNum);
double *resultPointer = reinterpret_cast<double *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[27], 1, sizeof(double), 3);
functions::indexreduce::IndexReduce<double>::executeIndexReduceScalar(launchDims, stream, opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
resultPointer,
nullptr, 0,
nullptr,
1,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
double result = resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execIndexReduceDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
Nd4jLong *xShapeInfo,
double *extraParams,
double *result,
Nd4jLong *resultShapeInfo,
int *dimension, int dimensionLength) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
Nd4jLong *hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
Nd4jLong *hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
Nd4jLong *hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
Nd4jLong *deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
Nd4jLong *deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D2 opNum:[%i]\n", opNum);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[27], dimensionLength, sizeof(double), 3);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
functions::indexreduce::IndexReduce<double>::executeIndexReduce(launchDims, stream,
opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
result,
resultShapeInfo, shape::rank(hostZShapeInfo),
dimension,
dimensionLength,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execBroadcastDouble(Nd4jPointer *extraPointers,
int opNum,
double *x,
Nd4jLong *xShapeInfo,
double *y,
Nd4jLong *yShapeInfo,
double *result,
Nd4jLong *resultShapeInfo,
int *dimension, int dimensionLength){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostYShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[7]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
auto deviceTADShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[12]);
auto deviceTADOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[13]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D3 opNum:[%i]\n", opNum);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[26], dimensionLength, sizeof(double), 2);
functions::broadcast::Broadcast<double>::executeBroadcast(launchDims, stream, opNum, x, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, dimension, dimensionLength, deviceTADShapeInfo, deviceTADOffsets, deviceTADShapeInfoZ, deviceTADOffsetsZ);
}
/**
*
* @param opNum
* @param dx
* @param xStride
* @param y
* @param yStride
* @param result
* @param resultStride
* @param extraParams
* @param n
*/
void NativeOps::execPairwiseTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
double *dx,
Nd4jLong xStride,
double *y,
Nd4jLong yStride,
double *result,
Nd4jLong resultStride,
double *extraParams, Nd4jLong n) {
dim3 launchDims(512, 512, 2048);
functions::pairwise_transforms::PairWiseTransform<double>::execudaCudaStrided(launchDims, extraPointers, opNum, dx, xStride, y, yStride, result, resultStride, extraParams, n);
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
* @param xIndexes
* @param yIndexes
* @param resultIndexes
*/
void NativeOps::execPairwiseTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
double *dx,
Nd4jLong *xShapeInfo,
double *y,
Nd4jLong *yShapeInfo,
double *result,
Nd4jLong *resultShapeInfo,
double *extraParams,
Nd4jLong *xIndexes,
Nd4jLong *yIndexes,
Nd4jLong *resultIndexes) {
///
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execPairwiseTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
double *dx,
Nd4jLong *xShapeInfo,
double *y,
Nd4jLong *yShapeInfo,
double *result,
Nd4jLong *resultShapeInfo,
double *extraParams) {
dim3 launchDims(512, 512, 2048);
functions::pairwise_transforms::PairWiseTransform<double>::execudaCudaShaped(launchDims, extraPointers, opNum, dx, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, extraParams);;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduceDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
Nd4jLong *xShapeInfo,
double *extraParams,
double *result,
Nd4jLong *resultShapeInfo) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D7 opNum:[%i]\n", opNum);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
if (opNum == 19) {
execReduceDouble(extraPointers, 3, x, xShapeInfo, extraParams, result, resultShapeInfo);
}
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[22], 1, sizeof(double), 1);
// this macro builds bunch of IF/ELSE selectors for kernel launch
functions::reduce::ReduceFunction<double>::execReduceScalar(launchDims, stream, opNum, x, xShapeInfo, extraParams, result, resultShapeInfo, nullptr,1 , reductionPointer, deviceTADShapeInfo);
nd4j::DebugHelper::checkErrorCode(stream, "execReduceDouble(...) failed");
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduceDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
Nd4jLong *xShapeInfo,
double *extraParams,
double *result,
Nd4jLong *resultShapeInfo,
int *dimension,
int dimensionLength) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D8 opNum:[%i]\n", opNum);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
if (opNum == 19) {
execReduceDouble(extraPointers, 3, x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength);
}
/**
* We have separate kernels, optimized for different number of dimensions for reductions
*/
if (dimensionLength == 1) {
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[32], dimensionLength, sizeof(double), 2);
functions::reduce::ReduceFunction<double>::execReduceXD(launchDims, stream, opNum, 1, x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
} else if (shape::rank(hostTADShapeInfo) <= 3) {
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[33], dimensionLength, sizeof(double), 2);
functions::reduce::ReduceFunction<double>::execReduceXD(launchDims, stream, opNum, shape::rank(hostTADShapeInfo), x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
} else {
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[22], dimensionLength, sizeof(double), 2);
functions::reduce::ReduceFunction<double>::execReduceXD(launchDims, stream, opNum, shape::rank(hostTADShapeInfo), x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
}
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @return
*/
double NativeOps::execReduceScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
Nd4jLong *xShapeInfo,
double *extraParams){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D9 opNum:[%i]\n", opNum);
double *resultPointer = reinterpret_cast<double *>(extraPointers[5]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
//dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[22], 1, sizeof(double), 1);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[22]);
// for LogExpSum op we need to know max value, and store it
if (opNum == 19) {
double tmp = execReduceScalarDouble(extraPointers, 3, x, xShapeInfo, extraParams);
extraParams = resultPointer;
};
// this macro builds bunch of IF/ELSE selectors for kernel launch
//DISPATCH_SIMPLE(reduceScalarSimple, double, PARAMS(x, xShapeInfo, extraParams, resultPointer, nullptr, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS))
functions::reduce::ReduceFunction<double>::execReduceScalar(launchDims, stream, opNum, x, xShapeInfo, extraParams, resultPointer, nullptr, nullptr,1 , reductionPointer, deviceTADShapeInfo);
nd4j::DebugHelper::checkErrorCode(stream, "execReduceScalarDouble(...) failed");
double result = resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduce3Double(
Nd4jPointer *extraPointers,
int opNum,
double *x,
Nd4jLong *xShapeInfo,
double *extraParams,
double *y,
Nd4jLong *yShapeInfo,
double *result,
Nd4jLong *resultShapeInfo) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
auto yDeviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[12]);
auto yDeviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[13]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D10 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[21]);
reduce3Double<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
nullptr,
1,
1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
DEBUG_KERNEL(stream, opNum);
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
*/
double NativeOps::execReduce3ScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
Nd4jLong *xShapeInfo,
double *extraParams,
double *y,
Nd4jLong *yShapeInfo){
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D11 opNum:[%i]\n", opNum);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
auto resultPointer = reinterpret_cast<double *>(extraPointers[5]);
auto allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
auto yDeviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[12]);
auto yDeviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[13]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[21]);
reduce3ScalarDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
resultPointer,
nullptr,
nullptr,
1,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
// since this method should return scalar value - we should block on this call
nd4j::DebugHelper::checkErrorCode(stream, "execReduce3ScalarDouble(...) failed");
double result = resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execReduce3Double(
Nd4jPointer *extraPointers,
int opNum,
double *x,
Nd4jLong *xShapeInfo,
double *extraParams,
double *y,
Nd4jLong *yShapeInfo,
double *result,
Nd4jLong *resultShapeInfo,
int *dimension,
int dimensionLength){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D12 opNum:[%i]\n", opNum);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
auto yDeviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[12]);
Nd4jLong *yDeviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[13]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[21]);
reduce3Double<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
dimension,
dimensionLength,
1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
DEBUG_KERNEL(stream, opNum);
}
/**
*
* @param opNum
* @param x
* @param xStride
* @param result
* @param resultStride
* @param scalar
* @param extraParams
* @param n
*/
void NativeOps::execScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
Nd4jLong xStride,
double *result,
Nd4jLong resultStride,
double scalar,
double *extraParams,
Nd4jLong n) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[20]);
functions::scalar::ScalarTransform<double>::executeCudaStrided(launchDims, extraPointers, opNum, x, xStride, result, resultStride, scalar, extraParams, n);
DEBUG_KERNEL(stream, opNum);
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param scalar
* @param extraParams
* @param n
*/
void NativeOps::execScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
Nd4jLong *xShapeInfo,
double *result,
Nd4jLong *resultShapeInfo,
double scalar,
double *extraParams){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[19]);
// this macro builds bunch of IF/ELSE selectors for kernel launch
//DISPATCH_SIMPLE(scalarSimpleShaped, double, PARAMS(scalar, x, xShapeInfo, extraParams, result, resultShapeInfo, allocPointer), OPS_A(SCALAR_OPS))
functions::scalar::ScalarTransform<double>::executeCudaShaped(launchDims, extraPointers, opNum, x, xShapeInfo, result, resultShapeInfo, scalar, extraParams);
DEBUG_KERNEL(stream, opNum);
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param scalar
* @param extraParams
* @param n
* @param xIndexes
* @param resultIndexes
*/
void NativeOps::execScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
Nd4jLong *xShapeInfo,
double *result,
Nd4jLong *resultShapeInfo,
double scalar,
double *extraParams,
Nd4jLong n,
Nd4jLong *xIndexes,
Nd4jLong *resultIndexes){
printf("Unsupported operation: scalarIndices\n");
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
*/
double NativeOps::execSummaryStatsScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
Nd4jLong *xShapeInfo,
double *extraParams,bool biasCorrected){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
double *resultPointer = reinterpret_cast<double *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], 1, sizeof(double), 8);
launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x);
return functions::summarystats::SummaryStatsReduce<double>::execSummaryStatsReduceScalar(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, biasCorrected);
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execSummaryStatsDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
Nd4jLong *xShapeInfo,
double *extraParams,
double *result,
Nd4jLong *resultShapeInfo,bool biasCorrected) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D17 opNum:[%i]\n", opNum);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], 1, sizeof(double), 8);
// we have to limit grid size here, due to limited nature of reduction/allocation pointers
launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x);
functions::summarystats::SummaryStatsReduce<double>::execSummaryStatsReduce(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, result, resultShapeInfo, biasCorrected);
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execSummaryStatsDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
Nd4jLong *xShapeInfo,
double *extraParams,
double *result,
Nd4jLong *resultShapeInfo,
int *dimension, int dimensionLength,bool biasCorrected){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], dimensionLength, sizeof(double), 8);
// we're limiting maximum grid size for summaryStats ops
launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x);
functions::summarystats::SummaryStatsReduce<double>::execSummaryStatsReduce(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, biasCorrected);
}
/**
*
* @param opNum
* @param dx
* @param xStride
* @param result
* @param resultStride
* @param extraParams
* @param n
*/
void NativeOps::execTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
double *dx,
Nd4jLong xStride,
double *z,
Nd4jLong zStride,
double *extraParams,
Nd4jLong n) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D19 opNum:[%i]\n", opNum);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[16]);
functions::transform::Transform<double>::executeTransformStrided(launchDims, stream, opNum, n, dx, xStride, extraParams, z, zStride, allocPointer, reductionPointer);
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
double *dx,
Nd4jLong *xShapeInfo,
double *result,
Nd4jLong *resultShapeInfo,
double *extraParams){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D20 opNum:[%i]\n", opNum);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostYShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[7]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
int *maskedAllocPointer = allocPointer;
// special pointer for special buffer for special ops
double *specialPointer = reinterpret_cast<double *>(extraPointers[6]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[1]);
auto dimension = reinterpret_cast<int *>(specialPointer);
int *maxDimension = dimension + 1;
auto maxShapeBuffer = reinterpret_cast<Nd4jLong *>(maxDimension + 1);
double * special = reinterpret_cast<double *>(maxShapeBuffer + (MAX_RANK * 2 + 4));
auto devTadShapeInfo = reinterpret_cast<Nd4jLong *> (extraPointers[10]);
auto devTadOffsets = reinterpret_cast<Nd4jLong *> (extraPointers[11]);
/**
* ops between 38 and 41 are special ops:
* SoftMax, LogSoftMax, SoftMaxDerivative, IsMax
* On cuda we execute them as
*/
// simple trick to get workaround over reductions into scalar
if (opNum >= 38 && opNum <= 41) {
if (shape::isVector(hostXShapeInfo) && opNum != 41) {
// if that's vector, we just go directly to op in 1 block
/*
* For vector cases of everything, but IsMax (41) we go for single-kernel calls
*/
int length = shape::length(hostXShapeInfo);
int block = nd4j::math::nd4j_min<int>(256, length);
launchDims.x = 1;
launchDims.y = block;
launchDims.z += (block * sizeof(double) * 4);
functions::transform::Transform<double>::executeTransformShaped(launchDims, stream, opNum, dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), allocPointer, reductionPointer, devTadShapeInfo, devTadOffsets);
} else {
// going for blockwise specials
// we'll do some pointers mangling here, and execute kernels one by one
auto shape = shape::shapeOf(hostXShapeInfo);
switch (opNum) {
case 40: // LogSoftMax
case 39: // SoftMax Derivative
case 38: {// softmax
Nd4jPointer tempPointers[16];
tempPointers[0] = extraPointers[0];
tempPointers[1] = extraPointers[1];
tempPointers[2] = extraPointers[2];
tempPointers[3] = extraPointers[3];
tempPointers[4] = extraPointers[4];
tempPointers[5] = extraPointers[5];
tempPointers[6] = extraPointers[6];
tempPointers[7] = extraPointers[7];
tempPointers[8] = extraPointers[8];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[12];
tempPointers[13] = extraPointers[13];
tempPointers[14] = extraPointers[14];
tempPointers[15] = extraPointers[15];
Nd4jLong maxShape[2] = {shape::shapeOf(hostXShapeInfo)[0], 1};
auto hostMaxShapeBuffer = shape::shapeBuffer(2, maxShape);
tempPointers[7] = (Nd4jPointer) hostMaxShapeBuffer;
tempPointers[8] = (Nd4jPointer) hostMaxShapeBuffer;
// TODO: we could get rid of this one eventually
prepareShapeBuffer <<<1, 1, 128, *stream>>> (dimension, maxDimension, maxShapeBuffer, shape[0]);
DEBUG_KERNEL(stream, opNum);
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
// max 3
execReduceDouble(tempPointers, 3, dx, xShapeInfo, extraParams, special, maxShapeBuffer, maxDimension, 1);
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[10];
tempPointers[13] = extraPointers[11];
// sub 1
execBroadcastDouble(tempPointers, 1, dx, xShapeInfo, special,
maxShapeBuffer, result, resultShapeInfo, dimension, 1);
// exp 3
execTransformDouble(extraPointers, 3, result, resultShapeInfo, result, resultShapeInfo, extraParams);
tempPointers[8] = tempPointers[7];
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
//sum 1
execReduceDouble(tempPointers, 1, result, resultShapeInfo, extraParams, special,
maxShapeBuffer, maxDimension, 1);
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[10];
tempPointers[13] = extraPointers[11];
// divide 3
execBroadcastDouble(tempPointers, 3, result, resultShapeInfo, special,
maxShapeBuffer, result, resultShapeInfo, dimension, 1);
// log 3
if (opNum == 40)
execTransformDouble(extraPointers, 5, result, resultShapeInfo, result, resultShapeInfo, extraParams);
else if (opNum == 39)
execTransformDouble(extraPointers, 42, result, resultShapeInfo, result, resultShapeInfo, extraParams);
nd4j::DebugHelper::checkErrorCode(stream, "SoftMax failed failed");
delete hostMaxShapeBuffer;
break;
}
case 41: {
// IsMax along all dimensions
bool scalarCheat = false;
if (extraParams == nullptr) {
scalarCheat = true;
}
if (scalarCheat) {
/**
* In case of vector-input for IsMax, it just turns into IndexReduce call + further filler call
*/
int maxIdx = (int) execIndexReduceScalarDouble(extraPointers, 0, dx, xShapeInfo, extraParams);
int targetIdx = 0;
if (shape::order(hostXShapeInfo) == 'c' || shape::order(hostXShapeInfo) == 'f' && maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1] >= shape::length(hostXShapeInfo))
targetIdx = maxIdx;
else
targetIdx = maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1];
fillIsMaxDouble<<< 1, 128, 0, *stream >>>(result, shape::length(hostXShapeInfo), targetIdx);
} else {
auto tadMaxShapeInfo = reinterpret_cast<Nd4jLong *> (extraPointers[10]);
auto tadMaxOffsets = reinterpret_cast<Nd4jLong *> (extraPointers[11]);
int *dimension = reinterpret_cast<int *> (extraPointers[15]);
special = reinterpret_cast<double *>(extraPointers[17]);
int dimensionLength = getDeviceId(extraPointers[18]);
// we call for IMax on specified dimension
execIndexReduceDouble(extraPointers, 0, dx, xShapeInfo, extraParams, special, hostYShapeInfo, dimension, dimensionLength);
DEBUG_KERNEL(stream, opNum);
// at this point, all IMax indexes are gathered, and we execute filler
fillDimensionalIsMaxDouble<<<blockLimit, 64, funcAttributes[37].sharedSizeBytes, *stream>>>(special, hostYShapeInfo, result, resultShapeInfo, tadMaxShapeInfo, dimension, dimensionLength, tadMaxOffsets );
nd4j::DebugHelper::checkErrorCode(stream, "Legacy IsMax(...) failed");
}
break;
}
default: {
printf("Bad case for transformDouble\n");
break;
}
}
}
} else {
// for Im2Col & Col2Im we enforce higher dimensionality
// TODO: investigate this on high-end gpus
if (opNum == 37 || opNum == 36 || opNum == 71) {
launchDims.x = 512;
launchDims.y = 512;
launchDims.z += 512 * sizeof(double);
} else if (opNum == 70) {
// we'll be using shared memory to speed up reverse
launchDims.z += launchDims.y * sizeof(double);
}
// Histogram op requires additional memory chunk
// FIXME: make this one to use cache
if (opNum == 48) {
int length = shape::length(hostZShapeInfo);
cudaMalloc(reinterpret_cast<void **>(&maskedAllocPointer), length * launchDims.x * sizeof(double));
}
if (opNum == 71) {
launchDims.z += 512 * sizeof(double);
}
functions::transform::Transform<double>::executeTransformShaped(launchDims, stream, opNum, dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), maskedAllocPointer, reductionPointer, devTadShapeInfo, devTadOffsets);
// we need guaranteed sync here, due to temp memory release
if (opNum == 48)
nd4j::DebugHelper::checkErrorCode(stream, "execTransformShaped(...) failed");
// release Histogram memory
if (opNum == 48) {
cudaFree(reinterpret_cast<void *>(maskedAllocPointer));
}
}
DEBUG_KERNEL(stream, opNum);
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
double *dx,
Nd4jLong *xShapeInfo,
double *result,
Nd4jLong *resultShapeInfo,
double *extraParams,
Nd4jLong *xIndexes,
Nd4jLong *resultIndexes) {
//
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
*/
float NativeOps::execIndexReduceScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
Nd4jLong *xShapeInfo,
float *extraParams){
if (nd4j::Environment::getInstance()->isDebug())
printf("F1 opNum:[%i]\n", opNum);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostYShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[7]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
float *resultPointer = reinterpret_cast<float *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[13], 1, sizeof(float), 4);
if (nd4j::Environment::getInstance()->isDebugAndVerbose() && launchDims.x == 1)
printf("AF1 opNum:[%i]\n", opNum);
functions::indexreduce::IndexReduce<float>::executeIndexReduceScalar(launchDims, stream, opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
resultPointer,
nullptr, 0,
nullptr,
1,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
nd4j::DebugHelper::checkErrorCode(stream, "execIndexReduceScalarFloat(...) failed");
float result = resultPointer[0];
return result;
}
float NativeOps::execIndexReduceScalarHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
Nd4jLong *xShapeInfo,
float16 *extraParams){
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H1 opNum:[%i]\n", opNum);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
float16 *resultPointer = reinterpret_cast<float16 *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[13], 1, sizeof(float16), 8);
if (nd4j::Environment::getInstance()->isDebugAndVerbose() && launchDims.x == 1)
printf("AH1 opNum:[%i]\n", opNum);
functions::indexreduce::IndexReduce<float16>::executeIndexReduceScalar(launchDims, stream, opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
resultPointer,
nullptr, 0,
nullptr,
1,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
float result = (float) resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execIndexReduceFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
Nd4jLong *xShapeInfo,
float *extraParams,
float *result,
Nd4jLong *resultShapeInfo,
int *dimension,
int dimensionLength){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F2 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[13], dimensionLength, sizeof(float), 4);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF2 opNum:[%i]\n", opNum);
functions::indexreduce::IndexReduce<float>::executeIndexReduce(launchDims, stream, opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
result,
resultShapeInfo, shape::rank(hostZShapeInfo),
dimension,
dimensionLength,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
}
void NativeOps::execIndexReduceHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
Nd4jLong *xShapeInfo,
float16 *extraParams,
float16 *result,
Nd4jLong *resultShapeInfo,
int *dimension,
int dimensionLength){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H2 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[13], dimensionLength, sizeof(float16), 8);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH2 opNum:[%i]\n", opNum);
functions::indexreduce::IndexReduce<float16>::executeIndexReduce(launchDims, stream, opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
result,
resultShapeInfo, shape::rank(hostZShapeInfo),
dimension,
dimensionLength,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execBroadcastFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
Nd4jLong *xShapeInfo,
float *y,
Nd4jLong *yShapeInfo,
float *result,
Nd4jLong *resultShapeInfo,
int *dimension, int dimensionLength){
/*
cudaEvent_t start;
cudaEventCreateWithFlags(&start, cudaEventDisableTiming);
timespec tsX;
timespec tsY;
clock_gettime(CLOCK_REALTIME, &tsX);
*/
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostYShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[7]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
auto deviceTADShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[12]);
auto deviceTADOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[13]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F3 opNum:[%i]\n", opNum);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[12], 1, sizeof(float), 0);
functions::broadcast::Broadcast<float>::executeBroadcast(launchDims, stream, opNum, x, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, dimension, dimensionLength, deviceTADShapeInfo, deviceTADOffsets, deviceTADShapeInfoZ, deviceTADOffsetsZ);
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execBroadcastHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
Nd4jLong *xShapeInfo,
float16 *y,
Nd4jLong *yShapeInfo,
float16 *result,
Nd4jLong *resultShapeInfo,
int *dimension, int dimensionLength){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostYShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[7]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
auto deviceTADShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[12]);
auto deviceTADOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[13]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H3 opNum:[%i]\n", opNum);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[12], 1, sizeof(float16), 0);
functions::broadcast::Broadcast<float16>::executeBroadcast(launchDims, stream, opNum, x, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, dimension, dimensionLength, deviceTADShapeInfo, deviceTADOffsets, deviceTADShapeInfoZ, deviceTADOffsetsZ);
}
/**
*
* @param opNum
* @param dx
* @param xStride
* @param y
* @param yStride
* @param result
* @param resultStride
* @param extraParams
* @param n
*/
void NativeOps::execPairwiseTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
float *dx,
Nd4jLong xStride,
float *y,
Nd4jLong yStride,
float *result,
Nd4jLong resultStride,
float *extraParams, Nd4jLong n){
dim3 launchDims(512, 512, 2048);
functions::pairwise_transforms::PairWiseTransform<float>::execudaCudaStrided(launchDims, extraPointers, opNum, dx, xStride, y, yStride, result, resultStride, extraParams, n);
}
void NativeOps::execPairwiseTransformHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *dx,
Nd4jLong xStride,
float16 *y,
Nd4jLong yStride,
float16 *result,
Nd4jLong resultStride,
float16 *extraParams, Nd4jLong n){
dim3 launchDims(512, 512, 2048);
functions::pairwise_transforms::PairWiseTransform<float16>::execudaCudaStrided(launchDims, extraPointers, opNum, dx, xStride, y, yStride, result, resultStride, extraParams, n);
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
* @param xIndexes
* @param yIndexes
* @param resultIndexes
*/
void NativeOps::execPairwiseTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
float *dx,
Nd4jLong *xShapeInfo,
float *y,
Nd4jLong *yShapeInfo,
float *result,
Nd4jLong *resultShapeInfo,
float *extraParams,
Nd4jLong *xIndexes,
Nd4jLong *yIndexes,
Nd4jLong *resultIndexes){
///
}
void NativeOps::execPairwiseTransformHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *dx,
Nd4jLong *xShapeInfo,
float16 *y,
Nd4jLong *yShapeInfo,
float16 *result,
Nd4jLong *resultShapeInfo,
float16 *extraParams,
Nd4jLong *xIndexes,
Nd4jLong *yIndexes,
Nd4jLong *resultIndexes){
///
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execPairwiseTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
float *dx,
Nd4jLong *xShapeInfo,
float *y,
Nd4jLong *yShapeInfo,
float *result,
Nd4jLong *resultShapeInfo,
float *extraParams){
dim3 launchDims(512, 512, 2048);
functions::pairwise_transforms::PairWiseTransform<float>::execudaCudaShaped(launchDims, extraPointers, opNum, dx, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, extraParams);;
}
void NativeOps::execPairwiseTransformHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *dx,
Nd4jLong *xShapeInfo,
float16 *y,
Nd4jLong *yShapeInfo,
float16 *result,
Nd4jLong *resultShapeInfo,
float16 *extraParams){
dim3 launchDims(512, 512, 2048);
functions::pairwise_transforms::PairWiseTransform<float16>::execudaCudaShaped(launchDims, extraPointers, opNum, dx, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, extraParams);;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduceFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
Nd4jLong *xShapeInfo,
float *extraParams,
float *result,
Nd4jLong *resultShapeInfo) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F7 opNum:[%i]\n", opNum);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[8], 1, sizeof(float), 1);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF7 opNum:[%i]\n", opNum);
if (opNum == 19) {
execReduceFloat(extraPointers, 3, x, xShapeInfo, extraParams, result, resultShapeInfo);
}
// this macro builds bunch of IF/ELSE selectors for kernel launch
//DISPATCH_SIMPLE(reduceScalarSimple, float, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS))
functions::reduce::ReduceFunction<float>::execReduceScalar(launchDims, stream, opNum, x, xShapeInfo, extraParams, result, resultShapeInfo, nullptr,1 , reductionPointer, deviceTADShapeInfo);
nd4j::DebugHelper::checkErrorCode(stream, "execReduceFloat(...) failed");
}
void NativeOps::execReduceHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
Nd4jLong *xShapeInfo,
float16 *extraParams,
float16 *result,
Nd4jLong *resultShapeInfo) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H7 opNum:[%i]\n", opNum);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[8], 1, sizeof(float16), 1);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH7 opNum:[%i]\n", opNum);
if (opNum == 19) {
execReduceHalf(extraPointers, 3, x, xShapeInfo, extraParams, result, resultShapeInfo);
}
// this macro builds bunch of IF/ELSE selectors for kernel launch
//DISPATCH_SIMPLE(reduceScalarSimple, float16, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS))
functions::reduce::ReduceFunction<float16>::execReduceScalar(launchDims, stream, opNum, x, xShapeInfo, extraParams, result, resultShapeInfo, nullptr,1 , reductionPointer, deviceTADShapeInfo);
nd4j::DebugHelper::checkErrorCode(stream, "execReduceHalf(...) failed");
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduceFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
Nd4jLong *xShapeInfo,
float *extraParams,
float *result,
Nd4jLong *resultShapeInfo,
int *dimension,int dimensionLength){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F8 opNum:[%i]\n", opNum);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
// dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[8], dimensionLength, sizeof(float), 1);
if (opNum == 19) {
execReduceFloat(extraPointers, 3, x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength);
}
if (dimensionLength == 1) {
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[32], dimensionLength, sizeof(double), 2);
functions::reduce::ReduceFunction<float>::execReduceXD(launchDims, stream, opNum, 1, x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
} else if (shape::rank(hostTADShapeInfo) <= 3) {
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[33], dimensionLength, sizeof(double), 2);
functions::reduce::ReduceFunction<float>::execReduceXD(launchDims, stream, opNum, shape::rank(hostTADShapeInfo), x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
} else {
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[22], dimensionLength, sizeof(double), 2);
functions::reduce::ReduceFunction<float>::execReduceXD(launchDims, stream, opNum, shape::rank(hostTADShapeInfo), x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
}
}
void NativeOps::execReduceHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
Nd4jLong *xShapeInfo,
float16 *extraParams,
float16 *result,
Nd4jLong *resultShapeInfo,
int *dimension,int dimensionLength){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H8 opNum:[%i]\n", opNum);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[8], dimensionLength, sizeof(float16), 1);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH8 opNum:[%i]\n", opNum);
if (opNum == 19) {
execReduceHalf(extraPointers, 3, x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength);
}
if (dimensionLength == 1) {
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[32], dimensionLength, sizeof(double), 2);
functions::reduce::ReduceFunction<float16>::execReduceXD(launchDims, stream, opNum, 1, x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
} else if (shape::rank(hostTADShapeInfo) <= 3) {
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[33], dimensionLength, sizeof(double), 2);
functions::reduce::ReduceFunction<float16>::execReduceXD(launchDims, stream, opNum, shape::rank(hostTADShapeInfo), x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
} else {
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[22], dimensionLength, sizeof(double), 2);
functions::reduce::ReduceFunction<float16>::execReduceXD(launchDims, stream, opNum, shape::rank(hostTADShapeInfo), x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
}
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @return
*/
float NativeOps::execReduceScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
Nd4jLong *xShapeInfo,
float *extraParams){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F9 opNum:[%i]\n", opNum);
float *resultPointer = reinterpret_cast<float *>(extraPointers[5]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 8, funcAttributes[8]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF9 opNum:[%i]\n", opNum);
// for LogExpSum op we need to know max value, and store it
if (opNum == 19) {
float tmp = execReduceScalarFloat(extraPointers, 3, x, xShapeInfo, extraParams);
extraParams = resultPointer;
};
// this macro builds bunch of IF/ELSE selectors for kernel launch
//DISPATCH_SIMPLE(reduceScalarSimple, float, PARAMS(x, xShapeInfo, extraParams, resultPointer, nullptr, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS))
functions::reduce::ReduceFunction<float>::execReduceScalar(launchDims, stream, opNum, x, xShapeInfo, extraParams, resultPointer, nullptr, nullptr,1 , reductionPointer, deviceTADShapeInfo);
// blocking this one
nd4j::DebugHelper::checkErrorCode(stream, "execReduceScalarFloat(...) failed");
float result = resultPointer[0];
return result;
}
float NativeOps::execReduceScalarHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
Nd4jLong *xShapeInfo,
float16 *extraParams){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H9 opNum:[%i]\n", opNum);
float16 *resultPointer = reinterpret_cast<float16 *>(extraPointers[5]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 2, funcAttributes[8]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH9 opNum:[%i]\n", opNum);
// for LogExpSum op we need to know max value, and store it
if (opNum == 19) {
float tmp = execReduceScalarHalf(extraPointers, 3, x, xShapeInfo, extraParams);
extraParams = resultPointer;
};
// this macro builds bunch of IF/ELSE selectors for kernel launch
//DISPATCH_SIMPLE(reduceScalarSimple, float16, PARAMS(x, xShapeInfo, extraParams, resultPointer, nullptr, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS))
functions::reduce::ReduceFunction<float16>::execReduceScalar(launchDims, stream, opNum, x, xShapeInfo, extraParams, resultPointer, nullptr, nullptr,1 , reductionPointer, deviceTADShapeInfo);
// blocking call
nd4j::DebugHelper::checkErrorCode(stream, "execReduceScalarHalf(...) failed");
float result = (float) resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduce3Float(
Nd4jPointer *extraPointers,
int opNum,
float *x,
Nd4jLong *xShapeInfo,
float *extraParams,
float *y,
Nd4jLong *yShapeInfo,
float *result,
Nd4jLong *resultShapeInfo){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
auto yDeviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[12]);
auto yDeviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[13]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F10 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[7]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF10 opNum:[%i]\n", opNum);
reduce3ScalarFloat<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
nullptr,
1,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execReduce3Half(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
Nd4jLong *xShapeInfo,
float16 *extraParams,
float16 *y,
Nd4jLong *yShapeInfo,
float16 *result,
Nd4jLong *resultShapeInfo){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
auto yDeviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[12]);
auto yDeviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[13]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H10 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 8, funcAttributes[7]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH10 opNum:[%i]\n", opNum);
reduce3ScalarHalf<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
nullptr,
1,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
DEBUG_KERNEL(stream, opNum);
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
*/
float NativeOps::execReduce3ScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
Nd4jLong *xShapeInfo,
float *extraParams,
float *y,
Nd4jLong *yShapeInfo) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
auto yDeviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[12]);
auto yDeviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[13]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F11 opNum:[%i]\n", opNum);
float *resultPointer = reinterpret_cast<float *>(extraPointers[5]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 32, funcAttributes[7]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF11 opNum:[%i]\n", opNum);
reduce3ScalarFloat<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
resultPointer,
nullptr,
nullptr,
1,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
// blocking call
nd4j::DebugHelper::checkErrorCode(stream, "execReduce3ScalarFloat(...) failed");
float result = resultPointer[0];
return result;
}
float NativeOps::execReduce3ScalarHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
Nd4jLong *xShapeInfo,
float16 *extraParams,
float16 *y,
Nd4jLong *yShapeInfo) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
auto yDeviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[12]);
auto yDeviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[13]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H11 opNum:[%i]\n", opNum);
float16 *resultPointer = reinterpret_cast<float16 *>(extraPointers[5]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[7]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH11 opNum:[%i]\n", opNum);
reduce3ScalarHalf<<<launchDims.x,launchDims.y,launchDims.z + 2048, *stream>>>(
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
resultPointer,
nullptr,
nullptr,
1,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
// blocking call
nd4j::DebugHelper::checkErrorCode(stream, "execReduce3ScalarHalf(...) failed");
float result = (float) resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execReduce3Float(
Nd4jPointer *extraPointers,
int opNum,
float *x,
Nd4jLong *xShapeInfo,
float *extraParams,
float *y,
Nd4jLong *yShapeInfo,
float *result,
Nd4jLong *resultShapeInfo,
int *dimension,
int dimensionLength){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
auto yDeviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[12]);
auto yDeviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[13]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F12 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[7]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF12 opNum:[%i]\n", opNum);
if (shape::isScalar(hostZShapeInfo) || dimension == nullptr) {
reduce3ScalarFloat << < launchDims.x, launchDims.y, launchDims.z, *stream >> > (
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
dimension,
dimensionLength,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
} else {
reduce3Float << < 1, launchDims.y, launchDims.z, *stream >> > (
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
dimension,
dimensionLength,
1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
}
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execReduce3Half(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
Nd4jLong *xShapeInfo,
float16 *extraParams,
float16 *y,
Nd4jLong *yShapeInfo,
float16 *result,
Nd4jLong *resultShapeInfo,
int *dimension,
int dimensionLength){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
auto yDeviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[12]);
auto yDeviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[13]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H12 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 8, funcAttributes[7]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH12 opNum:[%i]\n", opNum);
if (shape::isScalar(hostZShapeInfo) || dimension == nullptr) {
reduce3ScalarHalf<< < launchDims.x, launchDims.y, launchDims.z, *stream >> > (
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
dimension,
dimensionLength,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
} else {
reduce3Half<< < 1, launchDims.y, launchDims.z, *stream >> > (
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
dimension,
dimensionLength,
1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
}
DEBUG_KERNEL(stream, opNum);
}
/**
*
* @param opNum
* @param x
* @param xStride
* @param result
* @param resultStride
* @param scalar
* @param extraParams
* @param n
*/
void NativeOps::execScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
Nd4jLong xStride,
float *result,
Nd4jLong resultStride,
float scalar,
float *extraParams,
Nd4jLong n){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[6]);
// this macro builds bunch of IF/ELSE selectors for kernel launch
functions::scalar::ScalarTransform<float>::executeCudaStrided(launchDims, extraPointers, opNum, x, xStride, result, resultStride, scalar, extraParams, n);
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execScalarHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
Nd4jLong xStride,
float16 *result,
Nd4jLong resultStride,
float scalar,
float16 *extraParams,
Nd4jLong n){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[6]);
// this macro builds bunch of IF/ELSE selectors for kernel launch
//DISPATCH_SIMPLE(scalarSimpleStrided, float16, PARAMS(n, scalar, x, xStride, extraParams, result, resultStride, allocPointer), OPS_A(SCALAR_OPS))
float16 sc = (float16) scalar;
functions::scalar::ScalarTransform<float16>::executeCudaStrided(launchDims, extraPointers, opNum, x, xStride, result, resultStride, sc, extraParams, n);
DEBUG_KERNEL(stream, opNum);
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param scalar
* @param extraParams
* @param n
*/
void NativeOps::execScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
Nd4jLong *xShapeInfo,
float *result,
Nd4jLong *resultShapeInfo,
float scalar,
float *extraParams){
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
Nd4jLong n = shape::length(hostXShapeInfo);
// if (nd4j::Environment::getInstance()->isDebugAndVerbose())
// printf("F14 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[5], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[5]);
//if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
// printf("AF14 opNum:[%i], xLength:[%i]\n", opNum, shape::length(hostXShapeInfo));
// this macro builds bunch of IF/ELSE selectors for kernel launch
//DISPATCH_SIMPLE(scalarSimpleShaped, float, PARAMS(scalar, x, xShapeInfo, extraParams, result, resultShapeInfo, allocPointer), OPS_A(SCALAR_OPS))
functions::scalar::ScalarTransform<float>::executeCudaShaped(launchDims, extraPointers, opNum, x, xShapeInfo, result, resultShapeInfo, scalar, extraParams);
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execScalarHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
Nd4jLong *xShapeInfo,
float16 *result,
Nd4jLong *resultShapeInfo,
float scalarF,
float16 *extraParams){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto n = shape::length(hostXShapeInfo);
//if (nd4j::Environment::getInstance()->isDebugAndVerbose())
// printf("H14 opNum:[%i]\n", opNum);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[5]);
float16 scalar = (float16) scalarF;
//if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
// printf("AH14 opNum:[%i], xLength:[%i]\n", opNum, shape::length(hostXShapeInfo));
// this macro builds bunch of IF/ELSE selectors for kernel launch
//DISPATCH_SIMPLE(scalarSimpleShaped, float16, PARAMS(scalar, x, xShapeInfo, extraParams, result, resultShapeInfo, allocPointer), OPS_A(SCALAR_OPS))
functions::scalar::ScalarTransform<float16>::executeCudaShaped(launchDims, extraPointers, opNum, x, xShapeInfo, result, resultShapeInfo, scalar, extraParams);
DEBUG_KERNEL(stream, opNum);
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param scalar
* @param extraParams
* @param n
* @param xIndexes
* @param resultIndexes
*/
void NativeOps::execScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
Nd4jLong *xShapeInfo,
float *result,
Nd4jLong *resultShapeInfo,
float scalar,
float *extraParams,
Nd4jLong *xIndexes,
Nd4jLong *resultIndexes){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto n = shape::length(hostXShapeInfo);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F15 opNum:[%i]\n", opNum);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[4]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF15 opNum:[%i]\n", opNum);
DEBUG_KERNEL(stream, opNum);
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
*/
float NativeOps::execSummaryStatsScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
Nd4jLong *xShapeInfo,
float *extraParams,bool biasCorrected){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
float *resultPointer = reinterpret_cast<float *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], 1, sizeof(float), 8);
// we limit grid size for SummaryStats calls
launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x);
return functions::summarystats::SummaryStatsReduce<float>::execSummaryStatsReduceScalar(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, biasCorrected);
}
float NativeOps::execSummaryStatsScalarHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
Nd4jLong *xShapeInfo,
float16 *extraParams,bool biasCorrected){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
float16 *resultPointer = reinterpret_cast<float16 *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
Nd4jLong *deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], 1, sizeof(float16), 8);
launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x);
return (float) functions::summarystats::SummaryStatsReduce<float16>::execSummaryStatsReduceScalar(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, biasCorrected);
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execSummaryStatsFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
Nd4jLong *xShapeInfo,
float *extraParams,
float *result,
Nd4jLong *resultShapeInfo,bool biasCorrected){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], 1, sizeof(float), 8);
// limiting number of blocks in grid, to match buffer memory size
launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x);
functions::summarystats::SummaryStatsReduce<float>::execSummaryStatsReduce(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, result, resultShapeInfo, biasCorrected);
}
void NativeOps::execSummaryStatsHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
Nd4jLong *xShapeInfo,
float16 *extraParams,
float16 *result,
Nd4jLong *resultShapeInfo,bool biasCorrected){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], 1, sizeof(float16), 8);
// as everywhere else, we limit maximal number of blocks for SummaryStats calls
launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x);
functions::summarystats::SummaryStatsReduce<float16>::execSummaryStatsReduce(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, result, resultShapeInfo, biasCorrected);
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execSummaryStatsFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
Nd4jLong *xShapeInfo,
float *extraParams,
float *result,
Nd4jLong *resultShapeInfo,
int *dimension,
int dimensionLength,bool biasCorrected){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], dimensionLength, sizeof(float), 8);
// as everywhere else, we limit maximal number of blocks for SummaryStats calls
launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x);
functions::summarystats::SummaryStatsReduce<float>::execSummaryStatsReduce(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, biasCorrected);
}
void NativeOps::execSummaryStatsHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
Nd4jLong *xShapeInfo,
float16 *extraParams,
float16 *result,
Nd4jLong *resultShapeInfo,
int *dimension,
int dimensionLength,
bool biasCorrected) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto deviceTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], dimensionLength, sizeof(float16), 8);
// as everywhere else, we limit maximal number of blocks for SummaryStats calls
launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x);
functions::summarystats::SummaryStatsReduce<float16>::execSummaryStatsReduce(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, biasCorrected);
}
/**
*
* @param opNum
* @param dx
* @param xStride
* @param result
* @param resultStride
* @param extraParams
* @param n
*/
void NativeOps::execTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
float *dx,
Nd4jLong xStride,
float *z,
Nd4jLong zStride,
float *extraParams,
Nd4jLong n) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F19 opNum:[%i]\n", opNum);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[2]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF19 opNum:[%i], xLength: [%i]\n", opNum, shape::length(hostXShapeInfo));
functions::transform::Transform<float>::executeTransformStrided(launchDims, stream, opNum, n, dx, xStride, extraParams, z, zStride, allocPointer, reductionPointer);
}
void NativeOps::execTransformHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *dx,
Nd4jLong xStride,
float16 *z,
Nd4jLong zStride,
float16 *extraParams,
Nd4jLong n) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H19 opNum:[%i]\n", opNum);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[2]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH19 opNum:[%i], xLength: [%i]\n", opNum, shape::length(hostXShapeInfo));
functions::transform::Transform<float16>::executeTransformStrided(launchDims, stream, opNum, n, dx, xStride, extraParams, z, zStride, allocPointer, reductionPointer);
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execTransformFloat(Nd4jPointer *extraPointers,int opNum,
float *dx,
Nd4jLong *xShapeInfo,
float *result,
Nd4jLong *resultShapeInfo,
float *extraParams) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostYShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[7]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F20 opNum:[%i]\n", opNum);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
// special pointer for special buffer for special ops
float *specialPointer = reinterpret_cast<float *>(extraPointers[6]);
int *dimension = reinterpret_cast<int *>(specialPointer);
int *maxDimension = dimension + 1;
auto maxShapeBuffer = reinterpret_cast<Nd4jLong *>(maxDimension + 1);
float * special = reinterpret_cast<float *> (maxShapeBuffer + (MAX_RANK * 2 + 4));
int *maskedAllocPointer = allocPointer;
auto devTadShapeInfo = reinterpret_cast<Nd4jLong *> (extraPointers[10]);
Nd4jLong *devTadOffsets = reinterpret_cast<Nd4jLong *> (extraPointers[11]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[1]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF20 opNum:[%i]\n", opNum);
// simple trick to get workaround over reductions into scalar
// that's special ops: SoftMax, SoftMaxDerivative, LogSoftMax, IsMax
if (opNum >= 38 && opNum <= 41) {
if (shape::isVector(hostXShapeInfo) && opNum != 41) {
// if that's vector, we just go directly to op in 1 block
int length = shape::length(hostXShapeInfo);
int block = nd4j::math::nd4j_min<int>(length, 256);
launchDims.x = 1;
launchDims.y = block;
launchDims.z += (block * sizeof(float) * 4);
functions::transform::Transform<float>::executeTransformShaped(launchDims, stream, opNum, dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), allocPointer, reductionPointer, devTadShapeInfo, devTadOffsets);
} else {
// going for blockwise specials
auto shape = shape::shapeOf(hostXShapeInfo);
switch (opNum) {
case 40: // LogSoftMax
case 39: // SoftMax Derivative
case 38: {// softmax
Nd4jPointer tempPointers[16];
tempPointers[0] = extraPointers[0];
tempPointers[1] = extraPointers[1];
tempPointers[2] = extraPointers[2];
tempPointers[3] = extraPointers[3];
tempPointers[4] = extraPointers[4];
tempPointers[5] = extraPointers[5];
tempPointers[6] = extraPointers[6];
tempPointers[7] = extraPointers[7];
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[12];
tempPointers[13] = extraPointers[13];
tempPointers[14] = extraPointers[14];
tempPointers[15] = extraPointers[15];
Nd4jLong maxShape[2] = {shape::shapeOf(hostXShapeInfo)[0], 1};
auto hostMaxShapeBuffer = shape::shapeBuffer(2, maxShape);
tempPointers[7] = (Nd4jPointer) hostMaxShapeBuffer;
tempPointers[8] = (Nd4jPointer) hostMaxShapeBuffer;
prepareShapeBuffer <<< 1, 1, 128, *stream >>> (dimension, maxDimension, maxShapeBuffer, shape[0]);
DEBUG_KERNEL(stream, opNum);
//shape::printShapeInfo(maxShapeBuffer);
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
// max 3
execReduceFloat(tempPointers, 3, dx, xShapeInfo, extraParams, special,
maxShapeBuffer, maxDimension, 1);
DEBUG_KERNEL(stream, opNum);
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[10];
tempPointers[13] = extraPointers[11];
// sub 1
execBroadcastFloat(tempPointers, 1, dx, xShapeInfo, special,
maxShapeBuffer, result, resultShapeInfo, dimension, 1);
DEBUG_KERNEL(stream, opNum);
// exp 3
execTransformFloat(extraPointers, 3, result, resultShapeInfo, result, resultShapeInfo, extraParams);
DEBUG_KERNEL(stream, opNum);
tempPointers[8] = tempPointers[7];
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
//sum 1
execReduceFloat(tempPointers, 1, result, resultShapeInfo, extraParams, special,
maxShapeBuffer, maxDimension, 1);
DEBUG_KERNEL(stream, opNum);
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[10];
tempPointers[13] = extraPointers[11];
// divide 3
execBroadcastFloat(tempPointers, 3, result, resultShapeInfo, special,
maxShapeBuffer, result, resultShapeInfo, dimension, 1);
DEBUG_KERNEL(stream, opNum);
// log 3
if (opNum == 40)
execTransformFloat(extraPointers, 5, result, resultShapeInfo, result, resultShapeInfo, extraParams);
else if (opNum == 39)
execTransformFloat(extraPointers, 42, result, resultShapeInfo, result, resultShapeInfo, extraParams);
nd4j::DebugHelper::checkErrorCode(stream, "SoftMaxFloat(...) failed");
delete hostMaxShapeBuffer;
break;
}
case 41: {
// IsMax along all dimensions
bool scalarCheat = false;
if (extraParams == nullptr) {
scalarCheat = true;
}
if (scalarCheat) {
// if that's 1D input - we'll just go for single dim IMax op call + filler
int maxIdx = (int) execIndexReduceScalarFloat(extraPointers, 0, dx, xShapeInfo, extraParams);
int targetIdx = 0;
if (shape::order(hostXShapeInfo) == 'c' || shape::order(hostXShapeInfo) == 'f' && maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1] >= shape::length(hostXShapeInfo))
targetIdx = maxIdx;
else
targetIdx = maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1];
fillIsMaxFloat<<< 1, 128, 1536, *stream >>>(result, shape::length(hostXShapeInfo), targetIdx);
nd4j::DebugHelper::checkErrorCode(stream, "Legacy IsMax(...) failed");
} else {
// going for dimension-based IsMax
auto tadMaxShapeInfo = reinterpret_cast<Nd4jLong *> (extraPointers[10]);
auto tadMaxOffsets = reinterpret_cast<Nd4jLong *> (extraPointers[11]);
auto dimension = reinterpret_cast<int *> (extraPointers[15]);
special = reinterpret_cast<float *>(extraPointers[17]);
int dimensionLength = getDeviceId(extraPointers[18]);
// we call for IMax on specified dimension
execIndexReduceFloat(extraPointers, 0, dx, xShapeInfo, extraParams, special, hostYShapeInfo, dimension, dimensionLength);
DEBUG_KERNEL(stream, opNum);
// at this point, all IMax indexes are gathered, and we execute
fillDimensionalIsMaxFloat<<<blockLimit, 64, funcAttributes[36].sharedSizeBytes, *stream>>>(special, hostYShapeInfo, result, resultShapeInfo, tadMaxShapeInfo, dimension, dimensionLength, tadMaxOffsets );
nd4j::DebugHelper::checkErrorCode(stream, "Legacy IsMax(...) failed");
}
break;
}
default: {
printf("Bad case for transformFloat\n");
break;
}
}
}
} else {
// we're enforcing larger grids for Col2Im & Im2Col
// TODO: for high-end gpus we might use higher values here
if (opNum == 37 || opNum == 36 || opNum == 71) {
launchDims.x = 512;
launchDims.y = 512;
launchDims.z += 512 * sizeof(float);
} else if (opNum == 70) {
// we'll be using shared memory to speed up reverse
launchDims.z += launchDims.y * sizeof(float);
}
// histogram op requies additional memory chunk :(
if (opNum == 48) {
int length = shape::length(hostZShapeInfo);
cudaMalloc(reinterpret_cast<void **>(&maskedAllocPointer), length * launchDims.x * sizeof(float));
}
if (opNum == 71) {
launchDims.z += 512 * sizeof(float);
}
/*
DISPATCH_SIMPLE(transformShaped, float,
PARAMS(dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo,
shape::rank(hostZShapeInfo), maskedAllocPointer, reductionPointer, devTadShapeInfo, devTadOffsets), OPS_A(TRANSFORM_OPS))
*/
functions::transform::Transform<float>::executeTransformShaped(launchDims, stream, opNum, dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), maskedAllocPointer, reductionPointer, devTadShapeInfo, devTadOffsets);
// we need guaranteed sync here, due to temp memory release
if (opNum == 48)
nd4j::DebugHelper::checkErrorCode(stream, "Legacy HistogramFloat(...) failed");
// release memory chunk
if (opNum == 48) {
cudaFree(reinterpret_cast<void *>(maskedAllocPointer));
}
}
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execTransformHalf(Nd4jPointer *extraPointers,int opNum,
float16 *dx,
Nd4jLong *xShapeInfo,
float16 *result,
Nd4jLong *resultShapeInfo,
float16 *extraParams) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostYShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[7]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H20 opNum:[%i]\n", opNum);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
int *maskedAllocPointer = allocPointer;
float16 *specialPointer = reinterpret_cast<float16 *>(extraPointers[6]);
int *dimension = reinterpret_cast<int *>(specialPointer);
int *maxDimension = dimension + 1;
auto maxShapeBuffer = reinterpret_cast<Nd4jLong *>(maxDimension + 1);
float16 * special = reinterpret_cast<float16 *>(maxShapeBuffer + (MAX_RANK * 2 + 4));
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[1]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH20 opNum:[%i]\n", opNum);
auto devTadShapeInfo = reinterpret_cast<Nd4jLong *> (extraPointers[10]);
auto devTadOffsets = reinterpret_cast<Nd4jLong *> (extraPointers[11]);
// simple trick to get workaround over reductions into scalar
// SoftMax, SoftMaxDerivative, LogSoftMax, IsMax
if (opNum >= 38 && opNum <= 41) {
if (shape::isVector(hostXShapeInfo) && opNum != 41) {
// if that's vector, we just go directly to op in 1 block
auto length = shape::length(hostXShapeInfo);
auto block = nd4j::math::nd4j_min<Nd4jLong>(length, 256);
launchDims.x = 1;
launchDims.y = block;
launchDims.z += (block * sizeof(float16) * 4);
functions::transform::Transform<float16>::executeTransformShaped(launchDims, stream, opNum, dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), allocPointer, reductionPointer, devTadShapeInfo, devTadOffsets);
} else {
// going for blockwise specials
auto shape = shape::shapeOf(hostXShapeInfo);
switch (opNum) {
case 40: // LogSoftMax
case 39: // SoftMax Derivative
case 38: {// softmax
Nd4jPointer tempPointers[16];
tempPointers[0] = extraPointers[0];
tempPointers[1] = extraPointers[1];
tempPointers[2] = extraPointers[2];
tempPointers[3] = extraPointers[3];
tempPointers[4] = extraPointers[4];
tempPointers[5] = extraPointers[5];
tempPointers[6] = extraPointers[6];
tempPointers[7] = extraPointers[7];
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[12];
tempPointers[13] = extraPointers[13];
tempPointers[14] = extraPointers[14];
tempPointers[15] = extraPointers[15];
Nd4jLong maxShape[2] = {shape::shapeOf(hostXShapeInfo)[0], 1};
auto hostMaxShapeBuffer = shape::shapeBuffer(2, maxShape);
tempPointers[7] = (Nd4jPointer) hostMaxShapeBuffer;
tempPointers[8] = (Nd4jPointer) hostMaxShapeBuffer;
// FIXME: fix this
prepareShapeBuffer <<< 1, 1, 128, *stream >>> (dimension, maxDimension, maxShapeBuffer, shape[0]);
DEBUG_KERNEL(stream, opNum);
//shape::printShapeInfo(maxShapeBuffer);
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
// max 3
execReduceHalf(tempPointers, 3, dx, xShapeInfo, extraParams, special,
maxShapeBuffer, maxDimension, 1);
DEBUG_KERNEL(stream, opNum);
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[10];
tempPointers[13] = extraPointers[11];
// sub 1
execBroadcastHalf(tempPointers, 1, dx, xShapeInfo, special,
maxShapeBuffer, result, resultShapeInfo, dimension, 1);
DEBUG_KERNEL(stream, opNum);
// exp 3
execTransformHalf(extraPointers, 3, result, resultShapeInfo, result, resultShapeInfo, extraParams);
DEBUG_KERNEL(stream, opNum);
tempPointers[8] = tempPointers[7];
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
//sum 1
execReduceHalf(tempPointers, 1, result, resultShapeInfo, extraParams, special,
maxShapeBuffer, maxDimension, 1);
DEBUG_KERNEL(stream, opNum);
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[10];
tempPointers[13] = extraPointers[11];
// divide 3
execBroadcastHalf(tempPointers, 3, result, resultShapeInfo, special,
maxShapeBuffer, result, resultShapeInfo, dimension, 1);
if (opNum == 40) {
DEBUG_KERNEL(stream, opNum);
execTransformHalf(tempPointers, 47, result, resultShapeInfo, result, resultShapeInfo, extraParams);
}
DEBUG_KERNEL(stream, opNum);
// log 3
if (opNum == 40)
execTransformHalf(extraPointers, 5, result, resultShapeInfo, result, resultShapeInfo, extraParams);
else if (opNum == 39)
execTransformHalf(extraPointers, 42, result, resultShapeInfo, result, resultShapeInfo, extraParams);
nd4j::DebugHelper::checkErrorCode(stream, "Legacy SoftMaxHalf(...) failed");
delete hostMaxShapeBuffer;
break;
}
case 41: {
// IsMax along all dimensions
bool scalarCheat = false;
if (extraParams == nullptr) {
scalarCheat = true;
}
if (scalarCheat) {
// 1D input, aka vector
int maxIdx = (int) execIndexReduceScalarHalf(extraPointers, 0, dx, xShapeInfo, extraParams);
int targetIdx = 0;
if (shape::order(hostXShapeInfo) == 'c' || shape::order(hostXShapeInfo) == 'f' && maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1] >= shape::length(hostXShapeInfo))
targetIdx = maxIdx;
else
targetIdx = maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1];
fillIsMaxHalf<<< 1, 128, 1536, *stream >>>(result, shape::length(hostXShapeInfo), targetIdx);
} else {
// going for dimension-based IsMax
auto tadMaxShapeInfo = reinterpret_cast<Nd4jLong *> (extraPointers[10]);
auto tadMaxOffsets = reinterpret_cast<Nd4jLong *> (extraPointers[11]);
int *dimension = reinterpret_cast<int *> (extraPointers[15]);
special = reinterpret_cast<float16 *>(extraPointers[17]);
int dimensionLength = getDeviceId(extraPointers[18]);
// we call for IMax on specified dimension
execIndexReduceHalf(extraPointers, 0, dx, xShapeInfo, extraParams, special, hostYShapeInfo, dimension, dimensionLength);
DEBUG_KERNEL(stream, opNum);
// at this point, all IMax indexes are gathered, and we execute
fillDimensionalIsMaxHalf<<<blockLimit, 64, funcAttributes[36].sharedSizeBytes, *stream>>>(special, hostYShapeInfo, result, resultShapeInfo, tadMaxShapeInfo, dimension, dimensionLength, tadMaxOffsets );
nd4j::DebugHelper::checkErrorCode(stream, "Legacy IsMaxHalf(...) failed");
}
break;
}
default: {
printf("Bad case for transformHalf\n");
break;
}
}
}
} else {
// Im2Col & Col2Im enforced grids
if (opNum == 37 || opNum == 36 || opNum == 71) {
launchDims.x = 512;
launchDims.y = 512;
launchDims.z += 512 * sizeof(float16);
} else if (opNum == 70) {
// we'll be using shared memory to speed up reverse
launchDims.z += launchDims.y * sizeof(float16);
}
// Histogram op requires additional memory chunk
if (opNum == 48) {
int length = shape::length(hostZShapeInfo);
cudaMalloc(reinterpret_cast<void **>(&maskedAllocPointer), length * launchDims.x * sizeof(float16));
}
if (opNum == 71) {
launchDims.z += 512 * sizeof(float16);
}
functions::transform::Transform<float16>::executeTransformShaped(launchDims, stream, opNum, dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), maskedAllocPointer, reductionPointer, devTadShapeInfo, devTadOffsets);
// we need guaranteed sync here, due to temp memory release
if (opNum == 48)
nd4j::DebugHelper::checkErrorCode(stream, "Legacy HistogramHalf(...) failed");
// release that histogram memory chunk
if (opNum == 48) {
cudaFree(reinterpret_cast<void *>(maskedAllocPointer));
}
}
DEBUG_KERNEL(stream, opNum);
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
float *dx,
Nd4jLong *xShapeInfo,
float *result,
Nd4jLong *resultShapeInfo,
float *extraParams,
Nd4jLong *xIndexes,
Nd4jLong *resultIndexes) {
///
}
void NativeOps::execTransformHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *dx,
Nd4jLong *xShapeInfo,
float16 *result,
Nd4jLong *resultShapeInfo,
float16 *extraParams,
Nd4jLong *xIndexes,
Nd4jLong *resultIndexes) {
///
}
template <typename T>
__device__ void flattenKernelGeneric(int dOffset,
char order,
T *result,
Nd4jLong *resultShapeInfo,
T *input,
Nd4jLong *inputShapeInfo, int *allocationPointer) {
__shared__ UnifiedSharedMemory *manager;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
manager = new(shmem) UnifiedSharedMemory(reinterpret_cast<int *>(shmem));
manager->init(sizeof(UnifiedSharedMemory), 4, 4, sizeof(shape::TAD), 2);
}
__syncthreads();
Nd4jLong tid = blockIdx.x * blockDim.x + threadIdx.x;
auto zShape = shape::shapeOf(resultShapeInfo);
auto zStride = shape::stride(resultShapeInfo);
auto yShape = shape::shapeOf(inputShapeInfo);
auto yStride = shape::stride(inputShapeInfo);
auto yOrder = shape::order(inputShapeInfo);
auto len = shape::length(inputShapeInfo);
auto resultEWS = shape::elementWiseStride(resultShapeInfo);
auto inputEWS = shape::elementWiseStride(inputShapeInfo);
if (yOrder == order) {
if (resultEWS >= 1 && inputEWS >= 1) {
for (int i = tid; i < len; i+= gridDim.x * blockDim.x) {
result[i * resultEWS + dOffset] = input[i * inputEWS];
}
} else {
auto rank = shape::rank(inputShapeInfo);
Nd4jLong coord[MAX_RANK];
if(order == 'f') {
for(auto i = tid; i < len; i+= gridDim.x * blockDim.x) {
shape::ind2sub(rank,yShape,i,coord);
auto offset = shape::getOffset(0,yShape,yStride,coord,rank);
result[i + dOffset] = input[offset];
}
}
else {
for(auto i = tid; i < len; i+= gridDim.x * blockDim.x) {
shape::ind2subC(rank,yShape,i,coord);
auto offset = shape::getOffset(0,yShape,yStride,coord,rank);
result[i + dOffset] = input[offset];
}
}
}
} else {
int rank = shape::rank(inputShapeInfo);
Nd4jLong coord[MAX_RANK];
if(order == 'f') {
for(int i = tid; i < len; i+= gridDim.x * blockDim.x) {
shape::ind2sub(rank,yShape,i,coord);
auto offset = shape::getOffset(0,yShape,yStride,coord,rank);
result[i+dOffset] = input[offset];
}
}
else {
for(int i = tid; i < len; i+= gridDim.x * blockDim.x) {
shape::ind2subC(rank,yShape,i,coord);
auto offset = shape::getOffset(0,yShape,yStride,coord,rank);
result[i+dOffset] = input[offset];
}
}
}
}
extern "C" __global__ void flattenKernelDouble(int offset,
char order,
double *result,
Nd4jLong *resultShapeInfo,
double *input,
Nd4jLong *inputShapeInfo, int *allocationPointer) {
flattenKernelGeneric<double>(
offset,
order, result,
resultShapeInfo,
input,
inputShapeInfo,
allocationPointer);
}
extern "C" __global__ void flattenKernelFloat(int offset,
char order,
float *result,
Nd4jLong *resultShapeInfo,
float *input,
Nd4jLong *inputShapeInfo, int *allocationPointer) {
flattenKernelGeneric<float>(
offset,
order,
result,
resultShapeInfo,
input,
inputShapeInfo,
allocationPointer);
}
extern "C" __global__ void flattenKernelHalf(int offset,
char order,
float16 *result,
Nd4jLong *resultShapeInfo,
float16 *input,
Nd4jLong *inputShapeInfo, int *allocationPointer) {
flattenKernelGeneric<float16>(
offset,
order,
result,
resultShapeInfo,
input,
inputShapeInfo,
allocationPointer);
}
/**
* Append an input array
* to the end of a flat array
* in a particular order
* @param offset the offset of the array to start at
* @param order the order
* @param result the result array
* @param resultShapeInfo the shape info for te array
* @param input the input for the array
* @param inputShapeInfo the shape information for that array
*/
void NativeOps::flattenFloat(
Nd4jPointer *extraPointers,
int offset,
char order,
float *result,
Nd4jLong *resultShapeInfo,
float *input,
Nd4jLong *inputShapeInfo) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostYShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[7]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F22 opNum:[7]\n");
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostYShapeInfo), 2, funcAttributes[30]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF222 opNum:[7]\n");
flattenKernelFloat<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>(offset, order, result, resultShapeInfo, input, inputShapeInfo, allocPointer);
DEBUG_KERNEL(stream, -1);
}
void NativeOps::flattenHalf(
Nd4jPointer *extraPointers,
int offset,
char order,
float16 *result,
Nd4jLong *resultShapeInfo,
float16 *input,
Nd4jLong *inputShapeInfo) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostYShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[7]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H22 opNum:[7]\n");
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostYShapeInfo), 2, funcAttributes[30]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH222 opNum:[7]\n");
flattenKernelHalf<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>(offset, order, result, resultShapeInfo, input, inputShapeInfo, allocPointer);
DEBUG_KERNEL(stream, -1);
}
/**
* Append an input array
* to the end of a flat array
* in a particular order
* @param offset the offset of the array to start at
* @param order the order
* @param result the result array
* @param resultShapeInfo the shape info for te array
* @param input the input for the array
* @param inputShapeInfo the shape information for that array
*/
void NativeOps::flattenDouble(
Nd4jPointer *extraPointers,
int offset,
char order,
double *result,
Nd4jLong *resultShapeInfo,
double *input,
Nd4jLong *inputShapeInfo) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostYShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[7]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D30 opNum:[7]\n");
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostYShapeInfo), 2, funcAttributes[34]);
flattenKernelDouble<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>(offset, order, result, resultShapeInfo, input, inputShapeInfo, allocPointer);
DEBUG_KERNEL(stream, -1);
}
void NativeOps::checkP2P() {
int curDevice = 0;
cudaGetDevice(&curDevice);
int devCnt = 0;
cudaGetDeviceCount(&devCnt);
if (curDevice < 0 && curDevice > devCnt)
curDevice = 0;
bool tempSupport = true;
if (devCnt > 1) {
for (int x = 0; x < devCnt; x++) {
for (int y = 0; y < devCnt; y++) {
if (x == y)
continue;
int canAccess = 0;
cudaSetDevice(x);
cudaDeviceCanAccessPeer(&canAccess, x , y);
if (!canAccess) {
tempSupport = false;
break;
}
}
}
supportedP2P = tempSupport;
cudaSetDevice(curDevice);
} else {
// if we have only 1 device - we say that we support P2P, since all data will be on 1 device
supportedP2P = true;
}
}
void NativeOps::enableP2P(bool enable) {
if (enable == allowedP2P)
return;
int curDevice = 0;
cudaGetDevice(&curDevice);
int devCnt = 0;
cudaGetDeviceCount(&devCnt);
if (curDevice < 0 && curDevice > devCnt)
curDevice = 0;
if (devCnt > 1) {
for (int x = 0; x < devCnt; x++) {
for (int y = 0; y < devCnt; y++) {
if (x == y)
continue;
int canAccess = 0;
cudaSetDevice(x);
cudaDeviceCanAccessPeer(&canAccess, x , y);
if (canAccess) {
if (enable) {
cudaDeviceEnablePeerAccess(y, 0);
} else {
cudaDeviceDisablePeerAccess(y);
}
} else {
if (nd4j::Environment::getInstance()->isVerbose()) printf("Peer access [%i] -> [%i] isn't possible\n", x, y);
}
}
}
cudaSetDevice(curDevice);
}
allowedP2P = enable;
cudaSetDevice(curDevice);
}
bool NativeOps::isP2PAvailable() {
return supportedP2P;
}
void NativeOps::initializeDevicesAndFunctions() {
int devCnt = 0;
cudaGetDeviceCount(&devCnt);
deviceProperties = new cudaDeviceProp[devCnt];
for (int i = 0; i < devCnt; i++) {
cudaSetDevice(i);
cudaGetDeviceProperties(&deviceProperties[i], i);
cudaDeviceSetLimit(cudaLimitStackSize, 4096);
}
cudaSetDevice(0);
checkP2P();
// enabling p2p gpu access if it's supported
if (supportedP2P && devCnt > 1)
enableP2P(allowedP2P);
//cudaFuncGetAttributes(&funcAttributes[0], (void *)transformFloatIndexes);
//void (*transformFloatPointer1)(int opNum, float *dy,int *shapeInfo, int xRank, float *params, float *result,int *resultShapeInfo, int zRank, int *allocationPointer, float *reductionPointer) = transformFloat;
// FIXME
//cudaFuncGetAttributes(&funcAttributes[1], transformFloatIndexes);
//void (*transformFloatPointer2)(int opNum, Nd4jLong n, float *dy, int incy, float *params, float *result,int resultStride, int *allocationPointer, float *reductionPointer) = transformFloat;
// FIXME
//cudaFuncGetAttributes(&funcAttributes[2], transformFloatIndexes);
//cudaFuncGetAttributes(&funcAttributes[3], (void *)functions::summarystats::summaryStatsReduceFloat);
//cudaFuncGetAttributes(&funcAttributes[4], (void *)scalarFloatIndexes);
// void (*scalarFloatPointer1)(int opNum, float dx,float *dy, int *shapeInfo, int xRank, float *params, float *result,int *resultShapeInfo, int zRank, int *allocPointer) = scalarFloat;
// cudaFuncGetAttributes(&funcAttributes[5], scalarFloatIndexes);
// void (*scalarFloatPointer2)(int opNum, Nd4jLong n,float dx, float *dy, int incy, float *params, float *result,int resultStride, int *allocPointer) = scalarFloat;
// cudaFuncGetAttributes(&funcAttributes[6], scalarFloatIndexes);
cudaFuncGetAttributes(&funcAttributes[7], reduce3Float);
cudaFuncGetAttributes(&funcAttributes[8], reduce3Float);
// printf("reduceFloat regs: [%i], static shmem: [%i]\n", funcAttributes[8].numRegs, funcAttributes[8].sharedSizeBytes);
cudaFuncGetAttributes(&funcAttributes[28], reduce3Float); // 1D
// printf("reduceFloat1D regs: [%i], static shmem: [%i]\n", funcAttributes[28].numRegs, funcAttributes[28].sharedSizeBytes);
cudaFuncGetAttributes(&funcAttributes[29], reduce3Float); // 6D
// printf("reduceFloat6D regs: [%i], static shmem: [%i]\n", funcAttributes[29].numRegs, funcAttributes[29].sharedSizeBytes);
cudaFuncGetAttributes(&funcAttributes[30], flattenKernelFloat);
cudaFuncGetAttributes(&funcAttributes[31], concatKernelFloat);
// cudaFuncGetAttributes(&funcAttributes[9], pairWiseTransformFloat);
// cudaFuncGetAttributes(&funcAttributes[10], pairWiseTransformFloatIndex);
// cudaFuncGetAttributes(&funcAttributes[11], pairWiseTransformStridedFloat);
cudaFuncGetAttributes(&funcAttributes[12], reduce3Float);
cudaFuncGetAttributes(&funcAttributes[13], reduce3Float);
///////////////////////////////////////// Doubles are separate, just in case of...
//cudaFuncGetAttributes(&funcAttributes[14], transformDoubleIndexes);
// void (*transformDoublePointer1)(int opNum, double *dy, int *shapeInfo, int xRank, double *params, double *result,int *resultShapeInfo, int zRank, int *allocationPointer, double *reductionPointer) = transformDouble;
// FIXME
//cudaFuncGetAttributes(&funcAttributes[15], transformDoubleIndexes);
//void (*transformDoublePointer2)(int opNum, Nd4jLong n, double *dy, int incy, double *params, double *result,int resultStride, int *allocationPointer, double *reductionPointer) = transformDouble;
// FIXME
//cudaFuncGetAttributes(&funcAttributes[16], transformDoubleIndexes);
//cudaFuncGetAttributes(&funcAttributes[17], functions::summarystats::summaryStatsReduceDouble);
// cudaFuncGetAttributes(&funcAttributes[18], scalarDoubleIndexes);
//void (*scalarDoublePointer1)(int opNum, double dx,double *dy, int *shapeInfo, int xRank, double *params, double *result,int *resultShapeInfo, int zRank, int *allocPointer) = scalarDouble;
// cudaFuncGetAttributes(&funcAttributes[19], scalarDoubleIndexes);
//void (*scalarDoublePointer2)(int opNum, Nd4jLong n,double dx, double *dy, int incy, double *params, double *result,int resultStride, int *allocPointer) = scalarDouble;
// cudaFuncGetAttributes(&funcAttributes[20], scalarDoubleIndexes);
cudaFuncGetAttributes(&funcAttributes[21], reduce3Double);
cudaFuncGetAttributes(&funcAttributes[22], reduce3Float);
// cudaFuncGetAttributes(&funcAttributes[23], pairWiseTransformDouble);
// cudaFuncGetAttributes(&funcAttributes[24], pairWiseTransformDoubleIndex);
// cudaFuncGetAttributes(&funcAttributes[25], pairWiseTransformStridedDouble);
cudaFuncGetAttributes(&funcAttributes[26], reduce3Double);
cudaFuncGetAttributes(&funcAttributes[27], reduce3Double);
cudaFuncGetAttributes(&funcAttributes[32], reduce3Float); // 1D
cudaFuncGetAttributes(&funcAttributes[33], reduce3Float); // 6D
cudaFuncGetAttributes(&funcAttributes[34], flattenKernelDouble);
cudaFuncGetAttributes(&funcAttributes[35], concatKernelDouble);
cudaFuncGetAttributes(&funcAttributes[36], fillDimensionalIsMaxFloat);
cudaFuncGetAttributes(&funcAttributes[37], fillDimensionalIsMaxDouble);
cudaFuncGetAttributes(&funcAttributes[38], concatKernelScalarFloat);
cudaFuncGetAttributes(&funcAttributes[39], concatKernelScalarDouble);
cudaFuncGetAttributes(&funcAttributes[40], concatKernelVStackFloat);
cudaFuncGetAttributes(&funcAttributes[41], concatKernelVStackDouble);
cudaFuncGetAttributes(&funcAttributes[42], concatKernelHStackFloat);
cudaFuncGetAttributes(&funcAttributes[43], concatKernelHStackDouble);
/////////////////////////
cudaFuncGetAttributes(&funcAttributes[44], averagingKernelHalf);
cudaFuncGetAttributes(&funcAttributes[45], averagingKernelFloat);
cudaFuncGetAttributes(&funcAttributes[46], averagingKernelDouble);
//
//cudaFuncGetAttributes(&funcAttributes[47], scalarAlongDimension_0_float);
//cudaFuncGetAttributes(&funcAttributes[48], scalarAlongDimension_0_float16);
//cudaFuncGetAttributes(&funcAttributes[48], scalarAlongDimension_0_double);
}
void NativeOps::initializeFunctions(Nd4jPointer *functions) {
nd4j::BlasHelper::getInstance()->initializeDeviceFunctions(functions);
/*
this->cublasSgemv = (CublasSgemv)functions[0];
this->cublasDgemv = (CublasDgemv)functions[1];
this->cublasHgemm = (CublasHgemm)functions[2];
this->cublasSgemm = (CublasSgemm)functions[3];
this->cublasDgemm = (CublasDgemm)functions[4];
this->cublasSgemmEx = (CublasSgemmEx)functions[5];
this->cublasHgemmBatched = (CublasHgemmBatched)functions[6];
this->cublasSgemmBatched = (CublasSgemmBatched)functions[7];
this->cublasDgemmBatched = (CublasDgemmBatched)functions[8];
*/
}
/**
* This method acquires memory chunk of requested size on host side
*
* @param pointer pointer that'll be used for allocation
* @param memorySize memory size, in bytes
* @param flags optional parameter
*/
Nd4jPointer NativeOps::mallocHost(Nd4jLong memorySize, int flags) {
Nd4jPointer pointer;
// cudaHostAllocMapped |cudaHostAllocPortable
cudaError_t res = cudaHostAlloc(reinterpret_cast<void **>(&pointer), memorySize, cudaHostAllocDefault);
if (res != 0)
pointer = 0L;
return pointer;
}
/**
* This method acquires memory chunk of requested size on specified device
*
* @param pointer pointer that'll be used for allocation
* @param memorySize memory size, in bytes
* @param ptrToDeviceId pointer to deviceId. For cuda that's just and int, for OpenCL that's pointer to device_id, etc
* @param flags optional parameter
*/
Nd4jPointer NativeOps::mallocDevice(Nd4jLong memorySize, Nd4jPointer ptrToDeviceId, int flags) {
Nd4jPointer pointer;
cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&pointer), memorySize);
if (res != 0)
pointer = 0L;
return pointer;
}
/**
* This method releases previously allocated host memory space
*
* @param pointer pointer that'll be freed
*/
int NativeOps::freeHost(Nd4jPointer pointer) {
cudaError_t res = cudaFreeHost(reinterpret_cast<void *>(pointer));
if (res != 0)
pointer = 0L;
return 1L;
}
/**
* This method releases previously allocated memory space on device
*
* @param pointer pointer that'll be freed
* @param ptrToDeviceId pointer to deviceId.
*/
int NativeOps::freeDevice(Nd4jPointer pointer, Nd4jPointer ptrToDeviceId) {
cudaError_t res = cudaFree(reinterpret_cast<void *>(pointer));
if (res != 0)
pointer = 0L;
return 1L;
}
Nd4jPointer NativeOps::createContext() {
return 0L;
}
Nd4jPointer NativeOps::createStream() {
Nd4jPointer nativeStream = (Nd4jPointer) malloc(sizeof(cudaStream_t));
CHECK_ALLOC(nativeStream, "Failed to allocate memory for new CUDA stream");
cudaError_t result = cudaStreamCreate(reinterpret_cast<cudaStream_t *>(&nativeStream));
checkCudaErrors(result);
if (result != 0)
throw std::runtime_error("cudaStreamCreate(...) failed");
return nativeStream;
}
Nd4jPointer NativeOps::createEvent() {
Nd4jPointer nativeEvent= (Nd4jPointer) malloc(sizeof(cudaEvent_t));
CHECK_ALLOC(nativeEvent, "Failed to allocate new CUDA event buffer");
cudaError_t result = cudaEventCreateWithFlags(reinterpret_cast<cudaEvent_t *>(&nativeEvent), cudaEventDisableTiming);
checkCudaErrors(result);
if (result != 0)
throw std::runtime_error("cudaEventCreateWithFlags(...) failed");
return nativeEvent;
}
int NativeOps::registerEvent(Nd4jPointer event, Nd4jPointer stream) {
cudaEvent_t *pEvent = reinterpret_cast<cudaEvent_t *>(&event);
cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&stream);
cudaError_t result = cudaEventRecord(*pEvent, *pStream);
checkCudaErrors(result);
if (result != 0)
throw std::runtime_error("cudaEventRecord(...) failed");
return 1;
}
int NativeOps::setDevice(Nd4jPointer ptrToDeviceId) {
int deviceId = getDeviceId(ptrToDeviceId);
cudaError_t result = cudaSetDevice(deviceId);
checkCudaErrors(result);
if (result != 0)
throw std::runtime_error("cudaSetDevice(...) failed");
return 1;
}
Nd4jLong NativeOps::getDeviceFreeMemory(Nd4jPointer ptrToDeviceId) {
int device = getDeviceId(ptrToDeviceId);
int orig = -1;
cudaGetDevice(&orig);
if (device >= 0 && device != orig) {
cudaSetDevice(device);
}
size_t memFree = 0;
size_t memTotal = 0;
cudaMemGetInfo(&memFree, &memTotal);
if (device >= 0 && device != orig) {
cudaSetDevice(orig);
}
return (Nd4jLong) memFree;
}
Nd4jLong NativeOps::getDeviceTotalMemory(Nd4jPointer ptrToDeviceId) {
int device = getDeviceId(ptrToDeviceId);
int orig = -1;
cudaGetDevice(&orig);
if (device >= 0 && device != orig) {
cudaSetDevice(device);
}
size_t memFree = 0;
size_t memTotal = 0;
cudaMemGetInfo(&memFree, &memTotal);
if (device >= 0 && device != orig) {
cudaSetDevice(orig);
}
return (Nd4jLong) memTotal;
}
int NativeOps::memcpy(Nd4jPointer dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) {
return memcpyAsync(dst, src, size, flags, reserved);
}
int NativeOps::memcpyAsync(Nd4jPointer dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) {
cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&reserved);
cudaMemcpyKind kind;
DEBUG_KERNEL(pStream, 0);
switch (flags) {
case 0: {
kind = cudaMemcpyHostToHost;
}
break;
case 1: {
kind = cudaMemcpyHostToDevice;
}
break;
case 2: {
kind = cudaMemcpyDeviceToHost;
}
case 3: {
kind = cudaMemcpyDeviceToDevice;
}
break;
default: {
printf("UNDEFINED MEMCPY!\n");
break;
}
}
cudaError_t result = cudaMemcpyAsync(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)), static_cast<size_t>(size), kind, *pStream);
if (result != 0) {
checkCudaErrors(result);
printf("Failed on [%lu] -> [%lu], size: [%i], direction: [%i], result: [%i]\n", src, dst, size, flags, static_cast<int>(result));
fflush(stdout);
fflush(stderr);
throw std::runtime_error("cudaMemcpyAsync(...) failed");
//return 0L;
}
return 1;
}
int NativeOps::memset(Nd4jPointer dst, int value, Nd4jLong size, int flags, Nd4jPointer reserved) {
cudaError_t result = cudaMemset(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size));
checkCudaErrors(result);
if (result != 0)
throw std::runtime_error("cudaMemset(...) failed");
return 1;
}
int NativeOps::memsetAsync(Nd4jPointer dst, int value, Nd4jLong size, int flags, Nd4jPointer reserved) {
cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&reserved);
cudaError_t result = cudaMemsetAsync(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size), *pStream);
checkCudaErrors(result);
if (result != 0)
throw std::runtime_error("cudaMemsetAsync(...) failed");
return 1;
}
int NativeOps::destroyEvent(Nd4jPointer event) {
cudaEvent_t *pEvent = reinterpret_cast<cudaEvent_t *>(&event);
cudaError_t result = cudaEventDestroy(*pEvent);
checkCudaErrors(result);
if (result != 0)
throw std::runtime_error("cudaEvenDestroy(...) failed");
return 1;
}
int NativeOps::streamSynchronize(Nd4jPointer stream) {
cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&stream);
cudaError_t result = cudaStreamSynchronize(*pStream);
checkCudaErrors(result);
if (result != 0)
throw std::runtime_error("cudaStreamSynchronize(...) failed");
return 1L;
}
int NativeOps::eventSynchronize(Nd4jPointer event) {
cudaEvent_t *pEvent = reinterpret_cast<cudaEvent_t *>(&event);
cudaError_t result = cudaEventSynchronize(*pEvent);
checkCudaErrors(result);
if (result != 0)
throw std::runtime_error("cudaEventSynchronize(...) failed");
return 1L;
}
int NativeOps::getAvailableDevices() {
int devCnt = 0;
cudaGetDeviceCount(&devCnt);
return devCnt;
}
void NativeOps::enableDebugMode(bool reallyEnable) {
nd4j::Environment::getInstance()->setDebug(reallyEnable);
}
void NativeOps::setGridLimit(int gridSize) {
if (gridSize > 8192)
gridSize = 8192;
if (gridSize < 1)
gridSize = 1;
blockLimit = gridSize;
}
int NativeOps::ompGetMaxThreads() {
return maxThreads;
}
int NativeOps::ompGetNumThreads() {
return maxThreads;
}
void NativeOps::setOmpNumThreads(int threads) {
if (threads > 1024)
threads = 1024;
if (threads < 32)
threads = 32;
maxThreads = threads;
}
void NativeOps::enableVerboseMode(bool reallyEnable) {
nd4j::Environment::getInstance()->setVerbose(reallyEnable);
}
int NativeOps::getDeviceMajor(Nd4jPointer ptrToDeviceId) {
int device = getDeviceId(ptrToDeviceId);
return deviceProperties[device].major;
}
int NativeOps::getDeviceMinor(Nd4jPointer ptrToDeviceId) {
int device = getDeviceId(ptrToDeviceId);
return deviceProperties[device].minor;
}
const char * NativeOps::getDeviceName(Nd4jPointer ptrToDeviceId) {
int device = getDeviceId(ptrToDeviceId);
return deviceProperties[device].name;
}
/**
* Concatneate multi array of the same shape together
* along a particular dimension
*/
void NativeOps::concatFloat(
Nd4jPointer *extraPointers,
int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
float *result,
Nd4jLong *resultShapeInfo,
Nd4jPointer *tadPointers,
Nd4jPointer *offsetPointers) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostShapePointers = reinterpret_cast<Nd4jLong **>(extraPointers[9]);
// numArrays will be used as number of TADs, so each block process 1 input
int smem = 0;
bool isVstack = false;
bool isScalar = true;
bool isHstack = false;
for (int i = 0; i < numArrays; i++) {
if (!shape::isScalar(hostShapePointers[i])) {
isScalar = false;
break;
}
}
if (!isScalar && dimension == 0 && shape::rank(hostXShapeInfo) == 2 && shape::order(hostXShapeInfo) == 'c' ) {
isVstack = true;
for (int i = 0; i < numArrays; i++) {
if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0 ||
shape::order(hostShapePointers[i]) != 'c') {
isVstack = false;
break;
}
}
}
// let's try to fit N-dimensional vstack
if (!isVstack && !isScalar && dimension == 0 && shape::order(hostXShapeInfo) == 'c') {
Nd4jLong length0 = shape::length(hostShapePointers[0]);
isVstack = true;
for (int i = 0; i < numArrays; i++) {
if (shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c' || length0 != shape::length(hostShapePointers[i])) {
isVstack = false;
break;
}
}
}
if (!isScalar && !isVstack && dimension == 1 && shape::isVector(hostXShapeInfo)) {
isHstack = true;
for (int i = 0; i < numArrays; i++) {
if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0) {
isHstack = false;
break;
}
}
}
if (isScalar) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going scalar concat\n");
smem = funcAttributes[38].sharedSizeBytes;
concatKernelScalarFloat<<< 128, 128, smem, *stream>>> (dimension, numArrays, reinterpret_cast<Nd4jPointer *>(data[0]), reinterpret_cast<Nd4jPointer *>(inputShapeInfo[0]), result, resultShapeInfo, reinterpret_cast<Nd4jPointer *>(tadPointers[0]), reinterpret_cast<Nd4jPointer *>(offsetPointers[0]));
} else if (isVstack) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going VStack concat\n");
smem = funcAttributes[40].sharedSizeBytes;
concatKernelVStackFloat<<< 128, 512, smem, *stream>>> (dimension, numArrays, reinterpret_cast<Nd4jPointer *>(data[0]), reinterpret_cast<Nd4jPointer *>(inputShapeInfo[0]), result, resultShapeInfo, reinterpret_cast<Nd4jPointer *>(tadPointers[0]), reinterpret_cast<Nd4jPointer *>(offsetPointers[0]));
} else if (isHstack) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going HStack concat\n");
smem = funcAttributes[42].sharedSizeBytes;
concatKernelHStackFloat<<< 128, 128, smem, *stream>>> (dimension, numArrays, reinterpret_cast<Nd4jPointer *>(data[0]), reinterpret_cast<Nd4jPointer *>(inputShapeInfo[0]), result, resultShapeInfo, reinterpret_cast<Nd4jPointer *>(tadPointers[0]), reinterpret_cast<Nd4jPointer *>(offsetPointers[0]));
} else {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going generic concat\n");
//smem = nd4j::math::nd4j_max<int>(funcAttributes[31].sharedSizeBytes + 768, 1280);
auto devZTadShape = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto devZOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
concatKernelFloat<<< 512, 512, 4096, *stream>>> (dimension, numArrays, reinterpret_cast<Nd4jPointer *>(data[0]), reinterpret_cast<Nd4jPointer *>(inputShapeInfo[0]), result, resultShapeInfo, reinterpret_cast<Nd4jPointer *>(tadPointers[0]), reinterpret_cast<Nd4jPointer *>(offsetPointers[0]), devZTadShape, devZOffsets);
}
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("sharedMemory requested for concatFloat: [%i], registers: [%i]\n", smem, funcAttributes[31].numRegs);
nd4j::DebugHelper::checkErrorCode(stream, "Legacy ConcatFloat(...) failed");
}
void NativeOps::concatHalf(
Nd4jPointer *extraPointers,
int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
float16 *result,
Nd4jLong *resultShapeInfo,
Nd4jPointer *tadPointers,
Nd4jPointer *offsetPointers) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostShapePointers = reinterpret_cast<Nd4jLong **>(extraPointers[9]);
// numArrays will be used as number of TADs, so each block process 1 input
int smem = 0;
bool isVstack = false;
bool isScalar = true;
bool isHstack = false;
for (int i = 0; i < numArrays; i++) {
if (!shape::isScalar(hostShapePointers[i])) {
isScalar = false;
break;
}
}
if (!isScalar && dimension == 0 && shape::rank(hostXShapeInfo) == 2 && shape::order(hostXShapeInfo) == 'c' ) {
isVstack = true;
for (int i = 0; i < numArrays; i++) {
if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c') {
isVstack = false;
break;
}
}
}
// let's try to fit N-dimensional vstack
if (!isVstack && !isScalar && dimension == 0 && shape::order(hostXShapeInfo) == 'c') {
Nd4jLong length0 = shape::length(hostShapePointers[0]);
isVstack = true;
for (int i = 0; i < numArrays; i++) {
if (shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c' || length0 != shape::length(hostShapePointers[i])) {
isVstack = false;
break;
}
}
}
if (!isScalar && !isVstack && dimension == 1 && shape::isVector(hostXShapeInfo)) {
isHstack = true;
for (int i = 0; i < numArrays; i++) {
if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0) {
isHstack = false;
break;
}
}
}
if (isScalar) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going scalar concat\n");
smem = funcAttributes[38].sharedSizeBytes;
concatKernelScalarHalf<<< 128, 128, smem, *stream>>> (dimension, numArrays, reinterpret_cast<Nd4jPointer *>(data[0]), reinterpret_cast<Nd4jPointer *>(inputShapeInfo[0]), result, resultShapeInfo, reinterpret_cast<Nd4jPointer *>(tadPointers[0]), reinterpret_cast<Nd4jPointer *>(offsetPointers[0]));
} else if (isVstack) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going VStack concat\n");
smem = funcAttributes[40].sharedSizeBytes;
concatKernelVStackHalf<<< 128, 128, smem, *stream>>> (dimension, numArrays, reinterpret_cast<Nd4jPointer *>(data[0]), reinterpret_cast<Nd4jPointer *>(inputShapeInfo[0]), result, resultShapeInfo, reinterpret_cast<Nd4jPointer *>(tadPointers[0]), reinterpret_cast<Nd4jPointer *>(offsetPointers[0]));
} else if (isHstack) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going HStack concat\n");
smem = funcAttributes[42].sharedSizeBytes;
concatKernelHStackHalf<<< 128, 128, smem, *stream>>> (dimension, numArrays, reinterpret_cast<Nd4jPointer *>(data[0]), reinterpret_cast<Nd4jPointer *>(inputShapeInfo[0]), result, resultShapeInfo, reinterpret_cast<Nd4jPointer *>(tadPointers[0]), reinterpret_cast<Nd4jPointer *>(offsetPointers[0]));
} else {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going generic concat\n");
//smem = nd4j::math::nd4j_max<int>(funcAttributes[31].sharedSizeBytes + 768, 1280);
auto devZTadShape = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto devZOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
concatKernelHalf<<< 512, 128, 4096, *stream>>> (dimension, numArrays, reinterpret_cast<Nd4jPointer *>(data[0]), reinterpret_cast<Nd4jPointer *>(inputShapeInfo[0]), result, resultShapeInfo, reinterpret_cast<Nd4jPointer *>(tadPointers[0]), reinterpret_cast<Nd4jPointer *>(offsetPointers[0]), devZTadShape, devZOffsets);
}
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("sharedMemory requested for concatHalf: [%i], registers: [%i]\n", smem, funcAttributes[31].numRegs);
nd4j::DebugHelper::checkErrorCode(stream, "ConcatHalf(...) failed");
}
void NativeOps::specialConcatFloat(
Nd4jPointer *extraPointers,
int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
float *result,
Nd4jLong *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) {
nd4j::SpecialMethods<float>::concatCpuGeneric(
dimension,
numArrays,
data,
inputShapeInfo,
result,
resultShapeInfo);
}
void NativeOps::specialConcatHalf(
Nd4jPointer *extraPointers,
int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
float16 *result,
Nd4jLong *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) {
nd4j::SpecialMethods<float16>::concatCpuGeneric(
dimension,
numArrays,
data,
inputShapeInfo,
result,
resultShapeInfo);
}
/**
* Concatneate multi array of the same shape together
* along a particular dimension
*/
void NativeOps::specialConcatDouble(
Nd4jPointer *extraPointers,
int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
double *result,
Nd4jLong *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) {
nd4j::SpecialMethods<double>::concatCpuGeneric(
dimension,
numArrays,
data,
inputShapeInfo,
result,
resultShapeInfo);
}
/**
* Concatneate multi array of the same shape together
* along a particular dimension
*/
void NativeOps::concatDouble(
Nd4jPointer *extraPointers,
int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
double *result,
Nd4jLong *resultShapeInfo,
Nd4jPointer *tadPointers,
Nd4jPointer *offsetPointers) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostShapePointers = reinterpret_cast<Nd4jLong **>(extraPointers[9]);
// numArrays will be used as number of TADs, so each block process 1 input
int smem = 0;
bool isVstack = false;
bool isScalar = true;
bool isHstack = false;
for (int i = 0; i < numArrays; i++) {
if (!shape::isScalar(hostShapePointers[i])) {
isScalar = false;
break;
}
}
if (!isScalar && dimension == 0 && shape::rank(hostXShapeInfo) == 2 && shape::order(hostXShapeInfo) == 'c' ) {
isVstack = true;
for (int i = 0; i < numArrays; i++) {
if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c') {
isVstack = false;
break;
}
}
}
// let's try to fit N-dimensional vstack
if (!isVstack && !isScalar && dimension == 0 && shape::order(hostXShapeInfo) == 'c') {
Nd4jLong length0 = shape::length(hostShapePointers[0]);
isVstack = true;
for (int i = 0; i < numArrays; i++) {
if (shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c' || length0 != shape::length(hostShapePointers[i])) {
isVstack = false;
break;
}
}
}
if (!isScalar && !isVstack && dimension == 1 && shape::isVector(hostXShapeInfo)) {
isHstack = true;
for (int i = 0; i < numArrays; i++) {
if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0) {
isHstack = false;
break;
}
}
}
if (isScalar) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going scalar concat\n");
smem = funcAttributes[39].sharedSizeBytes;
concatKernelScalarDouble<<< 128, 128, smem, *stream>>> (dimension, numArrays, reinterpret_cast<Nd4jPointer *>(data[0]), reinterpret_cast<Nd4jPointer *>(inputShapeInfo[0]), result, resultShapeInfo, reinterpret_cast<Nd4jPointer *>(tadPointers[0]), reinterpret_cast<Nd4jPointer *>(offsetPointers[0]));
} else if (isVstack) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going VStack concat\n");
smem = funcAttributes[41].sharedSizeBytes;
concatKernelVStackDouble<<< 128, 128, smem, *stream>>> (dimension, numArrays, reinterpret_cast<Nd4jPointer *>(data[0]), reinterpret_cast<Nd4jPointer *>(inputShapeInfo[0]), result, resultShapeInfo, reinterpret_cast<Nd4jPointer *>(tadPointers[0]), reinterpret_cast<Nd4jPointer *>(offsetPointers[0]));
} else if (isHstack) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going HStack concat\n");
smem = funcAttributes[43].sharedSizeBytes;
concatKernelHStackDouble<<< 128, 128, smem, *stream>>> (dimension, numArrays, reinterpret_cast<Nd4jPointer *>(data[0]), reinterpret_cast<Nd4jPointer *>(inputShapeInfo[0]), result, resultShapeInfo, reinterpret_cast<Nd4jPointer *>(tadPointers[0]), reinterpret_cast<Nd4jPointer *>(offsetPointers[0]));
} else {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going generic concat\n");
auto devZTadShape = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto devZOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
concatKernelDouble<<< 512, 128, 4096, *stream>>> (dimension, numArrays, reinterpret_cast<Nd4jPointer *>(data[0]), reinterpret_cast<Nd4jPointer *>(inputShapeInfo[0]), result, resultShapeInfo, reinterpret_cast<Nd4jPointer *>(tadPointers[0]), reinterpret_cast<Nd4jPointer *>(offsetPointers[0]), devZTadShape, devZOffsets);
}
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("sharedMemory requested for concatDouble: [%i], registers: [%i]\n", smem, funcAttributes[31].numRegs);
nd4j::DebugHelper::checkErrorCode(stream, "ConcatDouble(...) failed");
}
/**
* This method saves
*/
void NativeOps::tadOnlyShapeInfo(Nd4jLong *xShapeInfo, int *dimension, int dimensionLength, Nd4jLong *target, Nd4jLong *offsets) {
shape::TAD tad;
tad.init(xShapeInfo, dimension, dimensionLength);
//tad->setOutputBuffer(target);
tad.createTadOnlyShapeInfo();
tad.createOffsets();
std::memcpy(reinterpret_cast<void *>(target), tad.tadOnlyShapeInfo, shape::shapeInfoByteLength(tad.tadOnlyShapeInfo));
std::memcpy(reinterpret_cast<void *>(offsets), tad.tadOffsets, tad.numTads * sizeof(Nd4jLong));
}
int NativeOps::memcpyConstantAsync(Nd4jLong dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) {
cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&reserved);
cudaMemcpyKind kind;
DEBUG_KERNEL(pStream, -1);
switch (flags) {
case 0: {
kind = cudaMemcpyHostToHost;
}
break;
case 1: {
kind = cudaMemcpyHostToDevice;
}
break;
case 2: {
kind = cudaMemcpyDeviceToHost;
}
case 3: {
kind = cudaMemcpyDeviceToDevice;
}
break;
}
//cudaError_t result = cudaMemcpyAsync((void *) dst, (const void *) src, (size_t) size, kind, *pStream);
cudaError_t result = cudaMemcpyToSymbolAsync(deviceConstantMemory, const_cast<const void *>(src), size, dst, kind, *pStream);
checkCudaErrors(result);
if (result != 0)
throw std::runtime_error("cudaMemcpyToSymbolAsync(...) failed");
return 1;
}
Nd4jPointer NativeOps::getConstantSpace() {
Nd4jPointer dConstAddr;
cudaError_t result = cudaGetSymbolAddress(reinterpret_cast<void **>(&dConstAddr), deviceConstantMemory);
if (result != 0)
throw std::runtime_error("cudaGetSymbolAddress(...) failed");
return dConstAddr;
}
void NativeOps::pullRowsHalf(Nd4jPointer *extraPointers, float16 *x, Nd4jLong *xShapeInfo, float16 *z, Nd4jLong *zShapeInfo, Nd4jLong n, Nd4jLong *indexes, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *zTadShapeInfo, Nd4jLong *zTadOffsets) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
pullRowsKernelHalf<<<64, 256, 1024, *stream>>>(x, xShapeInfo, z, zShapeInfo, n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets);
DEBUG_KERNEL(stream, -1);
}
void NativeOps::pullRowsFloat(Nd4jPointer *extraPointers, float *x, Nd4jLong *xShapeInfo, float *z, Nd4jLong *zShapeInfo, Nd4jLong n, Nd4jLong *indexes, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *zTadShapeInfo, Nd4jLong *zTadOffsets) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
pullRowsKernelFloat<<<64, 256, 1024, *stream>>>(x, xShapeInfo, z, zShapeInfo, n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets);
DEBUG_KERNEL(stream, -1);
}
void NativeOps::pullRowsDouble(Nd4jPointer *extraPointers, double *x, Nd4jLong *xShapeInfo, double *z, Nd4jLong *zShapeInfo, Nd4jLong n, Nd4jLong *indexes, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *zTadShapeInfo, Nd4jLong *zTadOffsets) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
pullRowsKernelDouble<<<64, 256, 1024, *stream>>>(x, xShapeInfo, z, zShapeInfo, n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets);
DEBUG_KERNEL(stream, -1);
}
void NativeOps::averageHalf(Nd4jPointer *extras, Nd4jPointer *dx, float16 *dz, int n, Nd4jLong length, bool propagate) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
int mode = getDeviceId(extras[3]);
float16 **x = reinterpret_cast<float16 **>(dx);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("averageHalf called\n");
// launching on gpu
if (mode == 0) {
dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(float16), funcAttributes[44]);
averagingKernelHalf<<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, dz, n, length, propagate);
nd4j::DebugHelper::checkErrorCode(stream, "AverageHalf(...) failed");
} else {
nd4j::SpecialMethods<float16>::averageGeneric(x, dz, n, length, propagate);
}
}
void NativeOps::averageFloat(Nd4jPointer *extras, Nd4jPointer *dx, float *dz, int n, Nd4jLong length, bool propagate) {
cudaStream_t * stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
int mode = getDeviceId(extras[3]);
float **x = reinterpret_cast<float **>(dx);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("averageFloat called\n");
// launching on gpu
if (mode == 0) {
dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(float), funcAttributes[45]);
averagingKernelFloat<<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, dz, n, length, propagate);
nd4j::DebugHelper::checkErrorCode(stream, "AverageFloat(...) failed");
} else {
// launching on host memory
nd4j::SpecialMethods<float>::averageGeneric(x, dz, n, length, propagate);
}
}
void NativeOps::averageDouble(Nd4jPointer *extras, Nd4jPointer *dx, double *dz, int n, Nd4jLong length, bool propagate) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
int mode = getDeviceId(extras[3]);
double **x = reinterpret_cast<double **>(dx);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("averageDouble called\n");
// launching on gpu
if (mode == 0) {
dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(double), funcAttributes[46]);
averagingKernelDouble << < launchDims.x, launchDims.y, launchDims.z, *stream >> > (x, dz, n, length, propagate);
nd4j::DebugHelper::checkErrorCode(stream, "AverageDouble(...) failed");
} else {
nd4j::SpecialMethods<double>::averageGeneric(x, dz, n, length, propagate);
}
}
void NativeOps::accumulateHalf(Nd4jPointer *extras, Nd4jPointer *dx, float16 *dz, int n, Nd4jLong length) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
int mode = getDeviceId(extras[3]);
float16 **x = reinterpret_cast<float16 **>(dx);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("accumulateHalf called\n");
// launching on gpu
if (mode == 0) {
dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(float16), funcAttributes[44]);
accumulateKernelHalf<<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, dz, n, length);
nd4j::DebugHelper::checkErrorCode(stream, "AccumulateHalf(...) failed");
} else {
nd4j::SpecialMethods<float16>::accumulateGeneric(x, dz, n, length);
}
}
void NativeOps::accumulateFloat(Nd4jPointer *extras, Nd4jPointer *dx, float *dz, int n, Nd4jLong length) {
cudaStream_t * stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
int mode = getDeviceId(extras[3]);
float **x = reinterpret_cast<float **>(dx);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("accumulateFloat called\n");
// launching on gpu
if (mode == 0) {
dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(float), funcAttributes[45]);
accumulateKernelFloat<<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, dz, n, length);
nd4j::DebugHelper::checkErrorCode(stream, "AccumulateFloat(...) failed");
} else {
// launching on host memory
nd4j::SpecialMethods<float>::accumulateGeneric(x, dz, n, length);
}
}
void NativeOps::accumulateDouble(Nd4jPointer *extras, Nd4jPointer *dx, double *dz, int n, Nd4jLong length) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
int mode = getDeviceId(extras[3]);
double **x = reinterpret_cast<double **>(dx);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("accumulateDouble called\n");
// launching on gpu
if (mode == 0) {
dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(double), funcAttributes[46]);
accumulateKernelDouble << < launchDims.x, launchDims.y, launchDims.z, *stream >> > (x, dz, n, length);
nd4j::DebugHelper::checkErrorCode(stream, "AccumulateDouble(...) failed");
} else {
nd4j::SpecialMethods<double>::accumulateGeneric(x, dz, n, length);
}
}
void NativeOps::shuffleDouble(Nd4jPointer *extras, Nd4jPointer *dx, Nd4jPointer *xShapeInfo, Nd4jPointer *dz, Nd4jPointer *zShapeInfo, int N, int *shuffleMap, Nd4jPointer *tadShapeInfo, Nd4jPointer *tadOffsets) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
double **x = reinterpret_cast<double **>(dx);
double **z = reinterpret_cast<double **>(dz);
auto xShape = reinterpret_cast<Nd4jLong **>(xShapeInfo);
auto zShape = reinterpret_cast<Nd4jLong **>(zShapeInfo);
auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong **>(tadShapeInfo);
auto tadOffset = reinterpret_cast<Nd4jLong **>(tadOffsets);
shuffleKernelDouble<<<32, 128, 2048, *stream>>>(x, xShape, z, zShape, N, shuffleMap, tadOnlyShapeInfo, tadOffset);
DEBUG_KERNEL(stream, 0);
}
void NativeOps::shuffleFloat(Nd4jPointer *extras, Nd4jPointer *dx, Nd4jPointer *xShapeInfo, Nd4jPointer *dz, Nd4jPointer *zShapeInfo, int N, int *shuffleMap, Nd4jPointer *tadShapeInfo, Nd4jPointer *tadOffsets) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
float **x = reinterpret_cast<float **>(dx);
float **z = reinterpret_cast<float **>(dz);
auto xShape = reinterpret_cast<Nd4jLong **>(xShapeInfo);
auto zShape = reinterpret_cast<Nd4jLong **>(zShapeInfo);
auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong **>(tadShapeInfo);
auto tadOffset = reinterpret_cast<Nd4jLong **>(tadOffsets);
shuffleKernelFloat<<<32, 128, 2048, *stream>>>(x, xShape, z, zShape, N, shuffleMap, tadOnlyShapeInfo, tadOffset);
DEBUG_KERNEL(stream, 0);
}
void NativeOps::shuffleHalf(Nd4jPointer *extras, Nd4jPointer *dx, Nd4jPointer *xShapeInfo, Nd4jPointer *dz, Nd4jPointer *zShapeInfo, int N, int *shuffleMap, Nd4jPointer *tadShapeInfo, Nd4jPointer *tadOffsets) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
float16 **x = reinterpret_cast<float16 **>(dx);
float16 **z = reinterpret_cast<float16 **>(dz);
auto xShape = reinterpret_cast<Nd4jLong **>(xShapeInfo);
auto zShape = reinterpret_cast<Nd4jLong **>(zShapeInfo);
auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong **>(tadShapeInfo);
auto tadOffset = reinterpret_cast<Nd4jLong **>(tadOffsets);
shuffleKernelHalf<<<32, 128, 2048, *stream>>>(x, xShape, z, zShape, N, shuffleMap, tadOnlyShapeInfo, tadOffset);
DEBUG_KERNEL(stream, 0);
}
void NativeOps::execMetaPredicateStridedFloat(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, Nd4jLong N, float *dx, Nd4jLong xStride, float *dy, Nd4jLong yStride, float *dz, Nd4jLong zStride, float *extraA, float *extraB, float scalarA, float scalarB) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
functions::grid::GRIDStrided<float>::execMetaPredicateStrided(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB);
DEBUG_KERNEL(stream, opNumA);
}
void NativeOps::execMetaPredicateStridedDouble(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, Nd4jLong N, double *dx, Nd4jLong xStride, double *dy, Nd4jLong yStride, double *dz, Nd4jLong zStride, double *extraA, double *extraB, double scalarA, double scalarB) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
functions::grid::GRIDStrided<double>::execMetaPredicateStrided(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB);
DEBUG_KERNEL(stream, opNumA);
}
void NativeOps::execMetaPredicateStridedHalf(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, Nd4jLong N, float16 *dx, Nd4jLong xStride, float16 *dy, Nd4jLong yStride, float16 *dz, Nd4jLong zStride, float16 *extraA, float16 *extraB, float scalarA, float scalarB) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
float16 scalA = (float16) scalarA;
float16 scalB = (float16) scalarB;
functions::grid::GRIDStrided<float16>::execMetaPredicateStrided(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB);
DEBUG_KERNEL(stream, opNumA);
}
void NativeOps::execMetaPredicateReduceFloat(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, float *dx, Nd4jLong *xShapeInfo, float *dy, Nd4jLong *yShapeInfo, float *dz, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, float *extraA, float *extraB, float scalarA, float scalarB, bool scalarReturned) {
// no-op
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
/*
metaPredicateReduceFloat(const int opTypeA, const int opNumA, const int opTypeB, const int opNumB,
float *dx, int *xShapeInfo, float *dy, int *yShapeInfo, float *dz, int *zShapeInfo, int *tadShapeInfo, int *tadOffsets, float *reductionBuffer, float *extraA, float *extraB, float scalarA, float scalarB) {
*/
// metaPredicateReduceFloat<<<256, 256, 1024, *stream>>>(opTypeA, opNumA, opTypeB, opNumB, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, nullptr, extraA, extraB, scalarA, scalarB, scalarReturned);
DEBUG_KERNEL(stream, opNumA);
}
void NativeOps::execMetaPredicateShapeDouble(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, Nd4jLong N, double *dx, Nd4jLong *xShapeInfo, double *dy, Nd4jLong *yShapeInfo, double *dz, Nd4jLong *zShapeInfo, double *extraA, double *extraB, double scalarA, double scalarB) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
functions::grid::GRIDShaped<double>::execMetaPredicateShaped(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB);
DEBUG_KERNEL(stream, opNumA);
}
void NativeOps::execMetaPredicateShapeHalf(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, Nd4jLong N, float16 *dx, Nd4jLong *xShapeInfo, float16 *dy, Nd4jLong *yShapeInfo, float16 *dz, Nd4jLong *zShapeInfo, float16 *extraA, float16 *extraB, float scalarA, float scalarB) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
// we have to converf float -> fp16 prior to kernel call
float16 scalA = (float16) scalarA;
float16 scalB = (float16) scalarB;
functions::grid::GRIDShaped<float16>::execMetaPredicateShaped(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB);
DEBUG_KERNEL(stream, opNumA);
}
void NativeOps::execMetaPredicateShapeFloat(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, Nd4jLong N, float *dx, Nd4jLong *xShapeInfo, float *dy, Nd4jLong *yShapeInfo, float *dz, Nd4jLong *zShapeInfo, float *extraA, float *extraB, float scalarA, float scalarB) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
functions::grid::GRIDShaped<float>::execMetaPredicateShaped(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB);
DEBUG_KERNEL(stream, opNumA);
}
bool NativeOps::isExperimentalEnabled() {
return experimentalSupport;
}
void NativeOps::setOmpMinThreads(int threads) {
minThreads = nd4j::math::nd4j_max<int>(32, threads);
minThreads = nd4j::math::nd4j_min<int>(maxThreads, minThreads);
}
int NativeOps::getDevice() {
int curDevice = -1;
cudaGetDevice(&curDevice);
return curDevice;
}
void NativeOps::setElementThreshold(int num) {
// this is no-op for CUDA
}
void NativeOps::setTADThreshold(int num) {
// this is no-op for CUDA
}
void NativeOps::execScalarFloat(Nd4jPointer *extraPointers,int opNum,
float *x,
Nd4jLong *xShapeInfo,
float *z,
Nd4jLong *zShapeInfo,
float *scalars,
float *extraParams,
int *dimension,
int dimensionLength) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostTadShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
//dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]),hostXShapeInfo, hostTadShapeInfo, funcAttributes[47] ,dimensionLength, sizeof(float), 0);
dim3 launchDims = dim3(256, 256, 1024);
functions::scalar::ScalarTransform<float>::executeCudaAlongDimension(launchDims, extraPointers, opNum, x, xShapeInfo, z, zShapeInfo, scalars, extraParams, dimension, dimensionLength);
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execScalarDouble(Nd4jPointer *extraPointers,int opNum,
double *x,
Nd4jLong *xShapeInfo,
double *z,
Nd4jLong *zShapeInfo,
double *scalars,
double *extraParams,
int *dimension,
int dimensionLength) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(256, 256, 1024);
functions::scalar::ScalarTransform<double>::executeCudaAlongDimension(launchDims, extraPointers, opNum, x, xShapeInfo, z, zShapeInfo, scalars, extraParams, dimension, dimensionLength);
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execScalarHalf(Nd4jPointer *extraPointers,int opNum,
float16 *x,
Nd4jLong *xShapeInfo,
float16 *z,
Nd4jLong *zShapeInfo,
float16 *scalars,
float16 *extraParams,
int *dimension,
int dimensionLength) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(256, 256, 1024);
functions::scalar::ScalarTransform<float16>::executeCudaAlongDimension(launchDims, extraPointers, opNum, x, xShapeInfo, z, zShapeInfo, scalars, extraParams, dimension, dimensionLength);
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execAggregateFloat(Nd4jPointer *extraPointers,int opNum,
float **arguments,
int numArguments,
Nd4jLong **shapes,
int numShapes,
int *indexArguments,
int numIndexArguments,
int **intArrays,
int numIntArrays,
float *realArguments,
int numRealArguments) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int numBlocks = getDeviceId(extraPointers[2]);
int numThreads = getDeviceId(extraPointers[3]);
int shmem = getDeviceId(extraPointers[4]);
dim3 launchDims = dim3(numBlocks, numThreads, shmem);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(aggregateSimple, float, PARAMS(arguments, numArguments, shapes, numShapes, indexArguments, numIndexArguments, intArrays, numIntArrays, realArguments, numRealArguments), OPS_A(AGGREGATE_OPS))
nd4j::DebugHelper::checkErrorCode(stream, "execAggregateFloat(...) failed");
}
void NativeOps::execAggregateDouble(Nd4jPointer *extraPointers,int opNum,
double **arguments,
int numArguments,
Nd4jLong **shapes,
int numShapes,
int *indexArguments,
int numIndexArguments,
int **intArrays,
int numIntArrays,
double *realArguments,
int numRealArguments) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int numBlocks = getDeviceId(extraPointers[2]);
int numThreads = getDeviceId(extraPointers[3]);
int shmem = getDeviceId(extraPointers[4]);
dim3 launchDims = dim3(numBlocks, numThreads, shmem);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(aggregateSimple, double, PARAMS(arguments, numArguments, shapes, numShapes, indexArguments, numIndexArguments, intArrays, numIntArrays, realArguments, numRealArguments), OPS_A(AGGREGATE_OPS))
nd4j::DebugHelper::checkErrorCode(stream, "execAggregateDouble(...) failed");
}
void NativeOps::execAggregateHalf(Nd4jPointer *extraPointers,int opNum,
float16 **arguments,
int numArguments,
Nd4jLong **shapes,
int numShapes,
int *indexArguments,
int numIndexArguments,
int **intArrays,
int numIntArrays,
float16 *realArguments,
int numRealArguments) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int numBlocks = getDeviceId(extraPointers[2]);
int numThreads = getDeviceId(extraPointers[3]);
int shmem = getDeviceId(extraPointers[4]);
dim3 launchDims = dim3(numBlocks, numThreads, shmem);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(aggregateSimple, float16, PARAMS(arguments, numArguments, shapes, numShapes, indexArguments, numIndexArguments, intArrays, numIntArrays, realArguments, numRealArguments), OPS_A(AGGREGATE_OPS))
nd4j::DebugHelper::checkErrorCode(stream, "execAggregateHalf(...) failed");
}
void NativeOps::execAggregateBatchFloat(Nd4jPointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes, int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments) {
// not implemented yet
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int numBlocks = getDeviceId(extraPointers[2]);
int numThreads = getDeviceId(extraPointers[3]);
int shmem = getDeviceId(extraPointers[4]);
dim3 launchDims = dim3(numAggregates, numThreads, shmem);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(aggregateBatchSimple, float, PARAMS(numAggregates, opNum, maxArgs, maxShapes, maxIntArrays, maxIntArraySize, maxIdx, maxReals, ptrToArguments), OPS_A(AGGREGATE_OPS))
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execAggregateBatchDouble(Nd4jPointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes, int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments) {
// not implemented yet
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int numBlocks = getDeviceId(extraPointers[2]);
int numThreads = getDeviceId(extraPointers[3]);
int shmem = getDeviceId(extraPointers[4]);
dim3 launchDims = dim3(numAggregates, numThreads, shmem);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(aggregateBatchSimple, double, PARAMS(numAggregates, opNum, maxArgs, maxShapes, maxIntArrays, maxIntArraySize, maxIdx, maxReals, ptrToArguments), OPS_A(AGGREGATE_OPS))
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execAggregateBatchHalf(Nd4jPointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes, int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments) {
// not implemented yet
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int numBlocks = getDeviceId(extraPointers[2]);
int numThreads = getDeviceId(extraPointers[3]);
int shmem = getDeviceId(extraPointers[4]);
dim3 launchDims = dim3(numAggregates, numThreads, shmem);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(aggregateBatchSimple, float16, PARAMS(numAggregates, opNum, maxArgs, maxShapes, maxIntArrays, maxIntArraySize, maxIdx, maxReals, ptrToArguments), OPS_A(AGGREGATE_OPS))
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execRandomFloat(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, float *z, Nd4jLong *zShapeBuffer, float *extraArguments) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float)) );
functions::random::RandomFunction<float>::executeCudaSingle(launchDims, extraPointers, opNum, stateHost, z, zShapeBuffer, extraArguments);
}
void NativeOps::execRandomFloat(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, float *x, Nd4jLong *xShapeBuffer, float *y, Nd4jLong *yShapeBuffer, float *z, Nd4jLong *zShapeBuffer, float *extraArguments) {
dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float)) );
functions::random::RandomFunction<float>::executeCudaTriple(launchDims, extraPointers, opNum, stateHost, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments);
}
void NativeOps::execRandomFloat(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, float *x, Nd4jLong *xShapeBuffer, float *z, Nd4jLong *zShapeBuffer, float *extraArguments) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float)) );
functions::random::RandomFunction<float>::executeCudaDouble(launchDims, extraPointers, opNum, stateHost, x, xShapeBuffer, z, zShapeBuffer, extraArguments);
}
void NativeOps::execRandomDouble(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, double *z, Nd4jLong *zShapeBuffer, double *extraArguments) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(double)));
functions::random::RandomFunction<double>::executeCudaSingle(launchDims, extraPointers, opNum, state, z, zShapeBuffer, extraArguments);
}
void NativeOps::execRandomDouble(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, double *x, Nd4jLong *xShapeBuffer, double *y, Nd4jLong *yShapeBuffer, double *z, Nd4jLong *zShapeBuffer, double *extraArguments) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(double)));
functions::random::RandomFunction<double>::executeCudaTriple(launchDims, extraPointers, opNum, state, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments);
}
void NativeOps::execRandomDouble(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, double *x, Nd4jLong *xShapeBuffer, double *z, Nd4jLong *zShapeBuffer, double *extraArguments) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(double)));
functions::random::RandomFunction<double>::executeCudaDouble(launchDims, extraPointers, opNum, state, x, xShapeBuffer, z, zShapeBuffer, extraArguments);
}
void NativeOps::execRandomHalf(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, float16 *z, Nd4jLong *zShapeBuffer, float16 *extraArguments) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float16)));
functions::random::RandomFunction<float16>::executeCudaSingle(launchDims, extraPointers, opNum, state, z, zShapeBuffer, extraArguments);
}
void NativeOps::execRandomHalf(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, float16 *x, Nd4jLong *xShapeBuffer, float16 *y, Nd4jLong *yShapeBuffer, float16 *z, Nd4jLong *zShapeBuffer, float16 *extraArguments) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float16)));
functions::random::RandomFunction<float16>::executeCudaTriple(launchDims, extraPointers, opNum, state, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments);
}
void NativeOps::execRandomHalf(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, float16 *x, Nd4jLong *xShapeBuffer, float16 *z, Nd4jLong *zShapeBuffer, float16 *extraArguments) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float16)));
functions::random::RandomFunction<float16>::executeCudaDouble(launchDims, extraPointers, opNum, state, x, xShapeBuffer, z, zShapeBuffer, extraArguments);
}
Nd4jPointer NativeOps::initRandom(Nd4jPointer *extraPointers, long seed, long bufferSize, Nd4jPointer ptrToBuffer) {
unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
// we don't synchronize at random initialization, it's safe to go unsync here
// cudaStreamSynchronize(*stream);
auto ptrDev = reinterpret_cast<unsigned long long *>(ptrToBuffer);
auto buffer = new nd4j::random::RandomBuffer(seed, bufferSize, reinterpret_cast<uint64_t *>(ptrHost), reinterpret_cast<uint64_t *>(ptrDev));
buffer->propagateToDevice(buffer, *stream);
nd4j::DebugHelper::checkErrorCode(stream, "initRandom(...) failed A");
// we generate sequence in the host memory
nd4j::random::Xoroshiro128 generator(buffer);
generator.refreshBuffer();
// and copy it to gpu
cudaMemcpyAsync(ptrDev, ptrHost, bufferSize * 8, cudaMemcpyHostToDevice, *stream);
nd4j::DebugHelper::checkErrorCode(stream, "initRandom(...) failed B");
return buffer;
}
void NativeOps::destroyRandom(Nd4jPointer ptrBuffer) {
nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrBuffer);
// FIXME: it's bad thing, but we can't know in advance, which stream(s) where using this generator in practice
cudaDeviceSynchronize();
delete buffer;
}
void NativeOps::refreshBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) {
nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrRandom);
unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
cudaStreamSynchronize(*stream);
uint64_t *ptrDev = buffer->getDeviceBuffer();
// update rng state
buffer->setSeed(seed);
buffer->setOffset(0);
buffer->propagateToDevice(buffer, *stream);
// refresh buffer on host size
nd4j::random::Xoroshiro128 generator(buffer);
generator.refreshBuffer();
// copy back to gpu
cudaMemcpyAsync(ptrDev, ptrHost, buffer->getSize() * 8, cudaMemcpyHostToDevice, *stream);
}
void NativeOps::reSeedBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) {
nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrRandom);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
cudaStreamSynchronize(*stream);
// update rng state
buffer->reSeed(seed);
buffer->setOffset(0);
buffer->propagateToDevice(buffer, *stream);
}
/**
*
* @param npyArray
* @return
*/
Nd4jPointer NativeOps::shapeBufferForNumpy(Nd4jPointer npyArray) {
/*
cnpy::NpyArray *arrPointer = reinterpret_cast<cnpy::NpyArray *>(npyArray);
int *shapeBuffer = shape::shapeBufferOfNpy(*arrPointer);
return reinterpret_cast<Nd4jPointer>(shapeBuffer);
*/
cnpy::NpyArray arr = cnpy::loadNpyFromPointer(reinterpret_cast<char *>(npyArray));
unsigned int *shape = new unsigned int[arr.shape.size()];
for(int i = 0; i < arr.shape.size(); i++) {
shape[i] = arr.shape[i];
}
auto shapeBuffer = shape::shapeBufferOfNpy(arr.shape.size(),
shape,
arr.fortranOrder);
delete[] shape;
return reinterpret_cast<Nd4jPointer>(shapeBuffer);
}
/**
*
* @param npyArray
* @return
*/
Nd4jPointer NativeOps::dataPointForNumpy(Nd4jPointer npyArray) {
char *buff = reinterpret_cast<char *>(npyArray);
//printf("Pointer contents %s\n",buff);
cnpy::NpyArray arr = cnpy::loadNpyFromPointer(reinterpret_cast<char *>(npyArray));
cnpy::NpyArray *arrPointer = &arr;
char *data = arrPointer->data;
if(arrPointer->wordSize == sizeof(float)) {
float *floatData = reinterpret_cast<float *>(data);
return reinterpret_cast<Nd4jPointer>(floatData);
}
else if(arrPointer->wordSize == sizeof(double)) {
double *doubleData = reinterpret_cast<double *>(data);
return reinterpret_cast<Nd4jPointer >(doubleData);
}
return reinterpret_cast<Nd4jPointer >(0);
}
/**
* Load a numpy array from a file
* and return it as an Nd4jPointer
* @param path
* @return
*/
Nd4jPointer NativeOps::numpyFromFile(std::string path) {
/*cnpy::NpyArray arr = cnpy::npyLoad(path);
return reinterpret_cast<Nd4jPointer >(&arr);
*/
char *numpyBuffer = cnpy::loadFile(path.data());
return reinterpret_cast<Nd4jPointer >(numpyBuffer);
}
void NativeOps::releaseNumpy(Nd4jPointer npyArray) {
free(reinterpret_cast<void *>(npyArray));
}
/**
* Return the length of a shape buffer
* based on the pointer
* @param buffer the buffer pointer to check
* @return
*/
int NativeOps::lengthForShapeBufferPointer(Nd4jPointer buffer) {
auto shapeBuffer = reinterpret_cast<Nd4jLong *>(buffer);
return shape::shapeInfoLength(shape::rank(shapeBuffer));
}
/**
* Get the element size for a numpy array
* @param npyArray the numpy array's address
* to get the length for
* @return
*/
int NativeOps::elementSizeForNpyArray(Nd4jPointer npyArray) {
cnpy::NpyArray arr = cnpy::loadNpyFromPointer(reinterpret_cast<char *>(npyArray));
cnpy::NpyArray *arrPointer = &arr;
int size = arrPointer->wordSize;
return size;
/*
cnpy::NpyArray *arr = reinterpret_cast<cnpy::NpyArray *>(npyArray);
return arr->wordSize;
*/
}
/**
* The pointer to get the address for
*
* @param address the address to get the pointer
* @return the pointer for the given address
*/
Nd4jPointer NativeOps::pointerForAddress(Nd4jLong address) {
return reinterpret_cast<Nd4jPointer >(address);
}
void NativeOps::tearDouble(Nd4jPointer *extras, double *x, Nd4jLong *xShapeInfo, Nd4jPointer *targets, Nd4jLong *zShapeInfo, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
tearKernelDouble<<<512, 512, 512, *stream>>>(x, xShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets);
nd4j::DebugHelper::checkErrorCode(stream, "tearDouble(...) failed");
}
void NativeOps::tearFloat(Nd4jPointer *extras, float *x, Nd4jLong *xShapeInfo, Nd4jPointer *targets, Nd4jLong *zShapeInfo, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
tearKernelFloat<<<512, 512, 512, *stream>>>(x, xShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets);
nd4j::DebugHelper::checkErrorCode(stream, "tearFloat(...) failed");
}
void NativeOps::tearHalf(Nd4jPointer *extras, float16 *x, Nd4jLong *xShapeInfo, Nd4jPointer *targets, Nd4jLong *zShapeInfo, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
tearKernelHalf<<<512, 512, 512, *stream>>>(x, xShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets);
nd4j::DebugHelper::checkErrorCode(stream, "tearHalf(...) failed");
}
void prescanArrayRecursive(Nd4jPointer *extras, int *z, int *x, int numElements, int level) {
auto stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
auto g_scanBlockSums = reinterpret_cast<int **>(&extras[2]);
int blockSize = 512; // max size of the thread blocks
int numBlocks = nd4j::math::nd4j_max<int>(1, static_cast<int>(ceil(static_cast<float>(numElements) / (2.f * blockSize))));
int numThreads;
if (numBlocks > 1)
numThreads = blockSize;
else if (nd4j::isPowerOfTwo(numElements))
numThreads = numElements / 2;
else
numThreads = nd4j::floorPow2(numElements);
int numEltsPerBlock = numThreads * 2;
// if this is a non-power-of-2 array, the last block will be non-full
// compute the smallest power of 2 able to compute its scan.
int numEltsLastBlock =
numElements - (numBlocks-1) * numEltsPerBlock;
int numThreadsLastBlock = nd4j::math::nd4j_max<int>(1, numEltsLastBlock / 2);
int np2LastBlock = 0;
int sharedMemLastBlock = 0;
if (numEltsLastBlock != numEltsPerBlock) {
np2LastBlock = 1;
if(!isPowerOfTwo(numEltsLastBlock))
numThreadsLastBlock = floorPow2(numEltsLastBlock);
unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS;
sharedMemLastBlock = sizeof(int) * (2 * numThreadsLastBlock + extraSpace);
}
// padding space is used to avoid shared memory bank conflicts
int extraSpace = numEltsPerBlock / NUM_BANKS;
int sharedMemSize = sizeof(int) * (numEltsPerBlock + extraSpace);
// setup execution parameters
// if NP2, we process the last block separately
dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1);
dim3 threads(numThreads, 1, 1);
dim3 gridOnes(1, 1, 1);
dim3 threadsOnes(numThreadsLastBlock, 1, 1);
if (sharedMemSize < 2048)
sharedMemSize = 2048;
if (sharedMemLastBlock < 2048)
sharedMemLastBlock = 2048;
// execute the scan
if (numBlocks > 1) {
nd4j::prescanLauncher<true, false>(grid, threads, sharedMemSize, stream, z, x, g_scanBlockSums[level], numThreads * 2, 0, 0);
if (np2LastBlock) {
nd4j::prescanLauncher<true, true>(gridOnes, threadsOnes, sharedMemLastBlock, stream, z, x, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock);
}
// After scanning all the sub-blocks, we are mostly done. But now we
// need to take all of the last values of the sub-blocks and scan those.
// This will give us a new value that must be sdded to each block to
// get the final results.
// recursive (CPU) call
prescanArrayRecursive(extras, g_scanBlockSums[level], g_scanBlockSums[level], numBlocks, level+1);
nd4j::uniformAdd<<<grid, threads, 1024, *stream>>>(z, g_scanBlockSums[level], numElements - numEltsLastBlock, 0, 0);
if (np2LastBlock) {
nd4j::uniformAdd<<<1, numThreadsLastBlock, 1024, *stream>>>(z, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock);
}
} else if (isPowerOfTwo(numElements)) {
nd4j::prescanLauncher<false, false>(grid, threads, sharedMemSize, stream, z, x, 0, numThreads * 2, 0, 0);
} else {
nd4j::prescanLauncher<false, true>(grid, threads, sharedMemSize, stream, z, x, 0, numElements, 0, 0);
}
}
void NativeOps::encodeThresholdP1Float(Nd4jPointer *extras, float *dx, Nd4jLong N, int *dz, float threshold) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
int blockSize = 1024;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
nd4j::encoderKernelP1Float<<<numBlocks, blockSize , 1024, *stream>>>(dx, N, dz, threshold);
nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP1Float(...) failed");
}
void NativeOps::encodeThresholdP1Double(Nd4jPointer *extras, double *dx, Nd4jLong N, int *dz, float threshold) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
int blockSize = 1024;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
nd4j::encoderKernelP1Double<<<numBlocks, blockSize , 1024, *stream>>>(dx, N, dz, threshold);
nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP1Double(...) failed");
}
void NativeOps::encodeThresholdP1Half(Nd4jPointer *extras, float16 *dx, Nd4jLong N, int *dz, float threshold) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
int blockSize = 1024;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
encoderKernelP1Half<<<numBlocks, blockSize , 1024, *stream>>>(dx, N, dz, threshold);
nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP1Half(...) failed");
}
void NativeOps::encodeThresholdP2Int(Nd4jPointer *extraPointers, int *dx, Nd4jLong N, int *dz) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
//encoderKernelP2Float<<<numBlocks, blockSize , 1024 * sizeof(float), *stream>>>(dx, N, dz);
// it
prescanArrayRecursive(extraPointers, dz, dx + 1, (int) N, 0);
nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP2Int(...) failed");
}
void NativeOps::encodeThresholdP3Float(Nd4jPointer *extraPointers, float *dx, int *offsets, Nd4jLong N, int *dz){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int blockSize = 1024;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
nd4j::encoderKernelP3Float<<<numBlocks, blockSize , 4096, *stream>>>(dx, offsets, N, dz);
nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP3Float(...) failed");
}
void NativeOps::encodeThresholdP3Double(Nd4jPointer *extraPointers, double *dx, int *offsets, Nd4jLong N, int *dz){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int blockSize = 1024;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
nd4j::encoderKernelP3Double<<<numBlocks, blockSize , 4096, *stream>>>(dx, offsets, N, dz);
nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP3Double(...) failed");
}
void NativeOps::encodeThresholdP3Half(Nd4jPointer *extraPointers, float16 *dx, int *offsets, Nd4jLong N, int *dz){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int blockSize = 1024;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
nd4j::encoderKernelP3Half<<<numBlocks, blockSize , 4096, *stream>>>(dx, offsets, N, dz);
nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP3Half(...) failed");
}
void NativeOps::decodeThresholdFloat(Nd4jPointer *extraPointers, void *dx, Nd4jLong N, float *dz){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
// we probably want to have smaller blocks here, memory writes are misaligned anyway
int blockSize = 128;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
nd4j::decoderKernelFloat<<<numBlocks, blockSize , 1024, *stream>>>(dx, N, dz);
nd4j::DebugHelper::checkErrorCode(stream, "decodeThresholdFloat(...) failed");
}
void NativeOps::decodeThresholdDouble(Nd4jPointer *extraPointers, void *dx, Nd4jLong N, double *dz){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
// we probably want to have smaller blocks here, memory writes are misaligned anyway
int blockSize = 128;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
nd4j::decoderKernelDouble<<<numBlocks, blockSize , 1024, *stream>>>(dx, N, dz);
nd4j::DebugHelper::checkErrorCode(stream, "decodeThresholdDouble(...) failed");
}
void NativeOps::decodeThresholdHalf(Nd4jPointer *extraPointers, void *dx, Nd4jLong N, float16 *dz){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
// we probably want to have smaller blocks here, memory writes are misaligned anyway
int blockSize = 128;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
nd4j::decoderKernelHalf<<<numBlocks, blockSize , 1024, *stream>>>(dx, N, dz);
nd4j::DebugHelper::checkErrorCode(stream, "decodeThresholdHalf(...) failed");
}
void NativeOps::execReduce3AllDouble(Nd4jPointer *extraPointers,
int opNum,
double *x,
Nd4jLong *xInfo,
double *extraParamsVals,
double *y,
Nd4jLong *yInfo,
double *result,
Nd4jLong *resultShapeInfoBuffer,
int *dimension,
int dimensionLength,
Nd4jLong *xTadShapeInfo,
Nd4jLong *xOffsets,
Nd4jLong *yTadShapeInfo,
Nd4jLong *yOffsets) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D119 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[7], dimensionLength, sizeof(double), 2);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AD119 opNum:[%i]\n", opNum);
reduce3AllDouble<<<launchDims.x, 512, (512 * 8 * 2 + 512), *stream>>>(
opNum,
x,
xInfo,
y,
yInfo,
extraParamsVals,
result,
resultShapeInfoBuffer,
dimension,
dimensionLength,
1, allocationPointer, xTadShapeInfo, xOffsets, yTadShapeInfo, yOffsets);
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execReduce3AllFloat(Nd4jPointer *extraPointers,
int opNum,
float *x,
Nd4jLong *xInfo,
float *extraParamsVals,
float *y,
Nd4jLong *yInfo,
float *result,
Nd4jLong *resultShapeInfoBuffer,
int *dimension,
int dimensionLength,
Nd4jLong *xTadShapeInfo,
Nd4jLong *xOffsets,
Nd4jLong *yTadShapeInfo,
Nd4jLong *yOffsets) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F119 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[7], dimensionLength, sizeof(float), 2);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF119 opNum:[%i]\n", opNum);
reduce3AllFloat<<<launchDims.x, 512, (512 * 4 * 2 + 512), *stream>>>(
opNum,
x,
xInfo,
y,
yInfo,
extraParamsVals,
result,
resultShapeInfoBuffer,
dimension,
dimensionLength,
1, allocationPointer, xTadShapeInfo, xOffsets, yTadShapeInfo, yOffsets);
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::execReduce3AllHalf(Nd4jPointer *extraPointers,
int opNum,
float16 *x,
Nd4jLong *xInfo,
float16 *extraParamsVals,
float16 *y,
Nd4jLong *yInfo,
float16 *result,
Nd4jLong *resultShapeInfoBuffer,
int *dimension,
int dimensionLength,
Nd4jLong *xTadShapeInfo,
Nd4jLong *xOffsets,
Nd4jLong *yTadShapeInfo,
Nd4jLong *yOffsets) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]);
auto hostTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H119 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[7], dimensionLength, sizeof(float16), 2);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH119 opNum:[%i]\n", opNum);
reduce3AllHalf<<<launchDims.x, 512, (512 * 2 * 2 + 512), *stream>>>(
opNum,
x,
xInfo,
y,
yInfo,
extraParamsVals,
result,
resultShapeInfoBuffer,
dimension,
dimensionLength,
1, allocationPointer, xTadShapeInfo, xOffsets, yTadShapeInfo, yOffsets);
DEBUG_KERNEL(stream, opNum);
}
void NativeOps::sortFloat(Nd4jPointer *extraPointers, float *x, Nd4jLong *xShapeInfo, bool descending) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[ 1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto xLength = shape::length(hostXShapeInfo);
auto xEWS = shape::elementWiseStride(hostXShapeInfo);
// check if xLength is a power of 2, and use bitonic sort, if that's the case
if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) {
int numThreads = nd4j::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
for (int k = 2; k <= xLength; k = 2*k) {
for (int j = k >> 1; j > 0; j = j >> 1) {
cudaBitonicSortFloat<<<numBlocks, numThreads, 512, *stream>>>(x, xShapeInfo, j, k, xLength, descending);
}
}
} else {
#ifdef __clang__
if (1 > 0) {
#elif __GNUC__
if ((xLength > 1024 * 1024 * 10) && xEWS == 1) {
b40c::radix_sort::Enactor enactor;
b40c::util::DoubleBuffer<float> sort_storage(x);
enactor.Sort(sort_storage, xLength);
// fire reverse op
if (descending)
execTransformFloat(extraPointers, 70, x, xShapeInfo, x, xShapeInfo, nullptr);
} else {
#else
if (1 > 0) {
#endif
int numThreads = nd4j::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
numBlocks = nd4j::math::nd4j_min<int>(512, numBlocks);
int max = 2, dg = 0;
while (max < xLength) {
max <<= 1;
dg++;
}
max <<= 1;
for (int window = 2; window < max; window<<=1) {
int n = window;
int rev = 0;
do{
int half = n >> 1;
cudaSortFloat<<<numBlocks, numThreads, numThreads * 2 * sizeof(float), *stream>>>(x, xShapeInfo, n, xLength, rev, descending);
n>>=1;
rev = 1;
} while(n > 1);
}
}
}
nd4j::DebugHelper::checkErrorCode(stream, "sortFloat(...) failed");
}
void NativeOps::sortDouble(Nd4jPointer *extraPointers, double *x, Nd4jLong *xShapeInfo, bool descending) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
auto xLength = shape::length(hostXShapeInfo);
auto xEWS = shape::elementWiseStride(hostXShapeInfo);
// check if xLength is a power of 2, and use bitonic sort, if that's the case
if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) {
int numThreads = nd4j::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
for (int k = 2; k <= xLength; k = 2*k) {
for (int j = k >> 1; j > 0; j = j >> 1) {
cudaBitonicSortDouble<<<numBlocks, numThreads, 512, *stream>>>(x, xShapeInfo, j, k, xLength, descending);
}
}
} else {
#ifdef __clang__
if (1 > 0) {
#elif __GNUC__
if ((xLength > 1024 * 1024 * 10) && xEWS == 1) {
b40c::radix_sort::Enactor enactor;
b40c::util::DoubleBuffer<double> sort_storage(x);
enactor.Sort(sort_storage, xLength);
// fire reverse op
if (descending)
execTransformDouble(extraPointers, 70, x, xShapeInfo, x, xShapeInfo, nullptr);
} else {
#else
if ( 1 > 0) {
#endif
int numThreads = nd4j::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
numBlocks = nd4j::math::nd4j_min<int>(512, numBlocks);
int max = 2, dg = 0;
while (max < xLength) {
max <<= 1;
dg++;
}
max <<= 1;
for (int window = 2; window < max; window<<=1) {
int n = window;
int rev = 0;
do{
int half = n >> 1;
cudaSortDouble<<<numBlocks, numThreads, numThreads * 2 * sizeof(double), *stream>>>(x, xShapeInfo, n, xLength, rev, descending);
n>>=1;
rev = 1;
} while(n > 1);
}
}
}
nd4j::DebugHelper::checkErrorCode(stream, "sortDouble(...) failed");
}
void NativeOps::sortHalf(Nd4jPointer *extraPointers, float16 *x, Nd4jLong *xShapeInfo, bool descending) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
int xLength = shape::length(hostXShapeInfo);
// check if xLength is a power of 2, and use bitonic sort, if that's the case
if ((xLength != 0) && ((xLength & (xLength - 1)) == 0)) {
int numThreads = nd4j::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
for (int k = 2; k <= xLength; k = 2*k) {
for (int j = k >> 1; j > 0; j = j >> 1) {
cudaBitonicSortHalf<<<numBlocks, numThreads, 512, *stream>>>(x, xShapeInfo, j, k, xLength, descending);
}
}
} else {
// half is incompatible with radix, so only bitonic here
int numThreads = nd4j::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
numBlocks = nd4j::math::nd4j_min<int>(512, numBlocks);
int max = 2, dg = 0;
while (max < xLength) {
max <<= 1;
dg++;
}
max <<= 1;
for (int window = 2; window < max; window<<=1) {
int n = window;
int rev = 0;
do{
int half = n >> 1;
cudaSortHalf<<<numBlocks, numThreads, numThreads * 2 * sizeof(float16), *stream>>>(x, xShapeInfo, n, xLength, rev, descending);
n>>=1;
rev = 1;
} while(n > 1);
}
}
nd4j::DebugHelper::checkErrorCode(stream, "sortHalf(...) failed");
}
void NativeOps::sortTadFloat(Nd4jPointer *extraPointers, float *x, Nd4jLong *xShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool descending) {
// to be implemented
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
cudaSortTadFloat<<<512, 512, 1088 * sizeof(float), *stream>>>(x, xShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, descending);
nd4j::DebugHelper::checkErrorCode(stream, "sortTadFloat(...) failed");
}
void NativeOps::sortTadHalf(Nd4jPointer *extraPointers, float16 *x, Nd4jLong *xShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool descending) {
// to be implemented
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
cudaSortTadHalf<<<512, 512, 1088 * sizeof(float16), *stream>>>(x, xShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, descending);
nd4j::DebugHelper::checkErrorCode(stream, "sortTadHalf(...) failed");
}
void NativeOps::sortTadDouble(Nd4jPointer *extraPointers, double *x, Nd4jLong *xShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool descending) {
// to be implemented
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
cudaSortTadDouble<<<512, 512, 1088 * sizeof(double), *stream>>>(x, xShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, descending);
nd4j::DebugHelper::checkErrorCode(stream, "sortTadDouble(...) failed");
}
void NativeOps::sortCooIndicesFloat(Nd4jPointer *extraPointers, Nd4jLong *indices, float *values, Nd4jLong length, int rank) {
throw std::runtime_error("Not implemented yet");
}
void NativeOps::sortCooIndicesDouble(Nd4jPointer *extraPointers, Nd4jLong *indices, double *values, Nd4jLong length, int rank) {
throw std::runtime_error("Not implemented yet");
}
void NativeOps::sortCooIndicesHalf(Nd4jPointer *extraPointers, Nd4jLong *indices, float16 *values, Nd4jLong length, int rank) {
throw std::runtime_error("Not implemented yet");
}
Nd4jLong NativeOps::encodeBitmapFloat(Nd4jPointer *extraPointers, float *dx, Nd4jLong N, int *dz, float threshold) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto *hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
int *resultPointer = reinterpret_cast<int *>(extraPointers[2]);
int *reductionPointer = reinterpret_cast<int *>(extraPointers[3]);
cudaEncodeBitmapFloat<<<512, 512, 512 * 2 * sizeof(float) + 384, *stream>>>(dx, N, dz, resultPointer, reductionPointer, threshold);
nd4j::DebugHelper::checkErrorCode(stream, "encodeBitmapFloat(...) failed");
Nd4jLong result = (Nd4jLong) resultPointer[0];
resultPointer[0] = 0;
return result;
}
Nd4jLong NativeOps::encodeBitmapDouble(Nd4jPointer *extraPointers, double *dx, Nd4jLong N, int *dz, float threshold) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
int *resultPointer = reinterpret_cast<int *>(extraPointers[2]);
int *reductionPointer = reinterpret_cast<int *>(extraPointers[3]);
cudaEncodeBitmapDouble<<<512, 512, 512 * 2 * sizeof(double) + 384, *stream>>>(dx, N, dz, resultPointer, reductionPointer, threshold);
nd4j::DebugHelper::checkErrorCode(stream, "encodeBitmapDouble(...) failed");
Nd4jLong result = (Nd4jLong) resultPointer[0];
resultPointer[0] = 0;
return result;
}
Nd4jLong NativeOps::encodeBitmapHalf(Nd4jPointer *extraPointers, float16 *dx, Nd4jLong N, int *dz, float threshold) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
int *resultPointer = reinterpret_cast<int *>(extraPointers[2]);
int *reductionPointer = reinterpret_cast<int *>(extraPointers[3]);
cudaEncodeBitmapHalf<<<512, 512, (512 * sizeof(float16)) + (512 * sizeof(int)) + 384, *stream>>>(dx, N, dz, resultPointer, reductionPointer, threshold);
nd4j::DebugHelper::checkErrorCode(stream, "execBitmapHalf(...) failed");
Nd4jLong result = (Nd4jLong) resultPointer[0];
resultPointer[0] = 0;
return result;
}
void NativeOps::decodeBitmapFloat(Nd4jPointer *extraPointers, void *dx, Nd4jLong N, float *dz) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
cudaDecodeBitmapFloat<<<512, 512, 512 * sizeof(float) + 384, *stream>>>(dx, N, dz);
nd4j::DebugHelper::checkErrorCode(stream, "decodeBitmapFloat(...) failed");
}
void NativeOps::decodeBitmapDouble(Nd4jPointer *extraPointers, void *dx, Nd4jLong N, double *dz) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
cudaDecodeBitmapDouble<<<512, 512, 512 * sizeof(double) + 384, *stream>>>(dx, N, dz);
nd4j::DebugHelper::checkErrorCode(stream, "decodeBitmapDouble(...) failed");
}
void NativeOps::decodeBitmapHalf(Nd4jPointer *extraPointers, void *dx, Nd4jLong N, float16 *dz) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]);
cudaDecodeBitmapHalf<<<512, 512, 512 * sizeof(float16) + 384, *stream>>>(dx, N, dz);
nd4j::DebugHelper::checkErrorCode(stream, "decodeBitmapDouble(...) failed");
}
Nd4jLong* NativeOps::mmapFile(Nd4jPointer *extraPointers, const char *fileName, Nd4jLong length) {
return nullptr;
}
void NativeOps::munmapFile(Nd4jPointer *extraPointers, Nd4jLong* ptrMap, Nd4jLong length) {
}
Nd4jPointer NativeOps::executeProtoGraphFloat(Nd4jPointer *extraPointers, Nd4jPointer protoBufferPointer) {
return nullptr;
}
Nd4jPointer NativeOps::executeProtoGraphFloat(Nd4jPointer *extraPointers, const char *fileName) {
return nullptr;
}
nd4j::graph::ResultWrapper* NativeOps::executeFlatGraphFloat(Nd4jPointer *extraPointers, Nd4jPointer flatBufferPointer) {
return nullptr;
}
nd4j::graph::ResultWrapper* NativeOps::executeFlatGraphHalf(Nd4jPointer *extraPointers, Nd4jPointer flatBufferPointer) {
return nullptr;
}
nd4j::graph::ResultWrapper* NativeOps::executeFlatGraphDouble(Nd4jPointer *extraPointers, Nd4jPointer flatBufferPointer) {
return nullptr;
}
const char* NativeOps::getAllCustomOps() {
return nd4j::ops::OpRegistrator::getInstance()->getAllCustomOperations();
}
template<typename T>
nd4j::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, nd4j::ops::DeclarableOp<T>* op, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, T* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) {
nd4j::graph::VariableSpace<T> varSpace;
Context<T> block(2, &varSpace);
nd4j::ShapeList inShapes;
for (int e = 0; e < numIArgs; e++)
block.getIArguments()->push_back(iArgs[e]);
for (int e = 0; e < numTArgs; e++)
block.getTArguments()->push_back(tArgs[e]);
for (int e = 0; e < numInputShapes; e++) {
auto shape_ = reinterpret_cast<Nd4jLong *>(inputShapes[e]);
auto buffer_ = reinterpret_cast<T *>(inputBuffers[e]);
auto array = new nd4j::NDArray<T>(buffer_, shape_);
array->triggerAllocationFlag(false, false);
// block should contain references to proper variable
varSpace.putVariable(1, e, array);
block.pickInput(1, e);
inShapes.push_back(shape_);
}
auto shapeList = op->calculateOutputShape(&inShapes, block);
if (varSpace.workspace() != nullptr)
shapeList->detach();
return shapeList;
}
nd4j::ShapeList* NativeOps::calculateOutputShapesFloat(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, float* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationFloat(hash);
return _calculateOutputShapes<float>(extraPointers, op, inputBuffers, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs);
}
nd4j::ShapeList* NativeOps::calculateOutputShapesHalf(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, float16* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationHalf(hash);
return _calculateOutputShapes<float16>(extraPointers, op, inputBuffers, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs);
}
nd4j::ShapeList* NativeOps::calculateOutputShapesDouble(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationDouble(hash);
return _calculateOutputShapes<double>(extraPointers, op, inputBuffers, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs);
}
template<typename T>
nd4j::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, nd4j::ops::DeclarableOp<T>* op, Nd4jPointer* inputShapes, int numInputShapes, T* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) {
nd4j::graph::Context<T> block(1);
nd4j::ShapeList inShapes;
for (int e = 0; e < numIArgs; e++)
block.getIArguments()->push_back(iArgs[e]);
for (int e = 0; e < numTArgs; e++)
block.getTArguments()->push_back(tArgs[e]);
for (int e = 0; e < numInputShapes; e++)
inShapes.push_back(static_cast<Nd4jLong *>(inputShapes[e]));
auto shapeList = op->calculateOutputShape(&inShapes, block);
return shapeList;
}
nd4j::ShapeList* NativeOps::calculateOutputShapesFloat(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputShapes, int numInputShapes, float* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationFloat(hash);
return _calculateOutputShapes<float>(extraPointers, op, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs);
}
nd4j::ShapeList* NativeOps::calculateOutputShapesHalf(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputShapes, int numInputShapes, float16* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationHalf(hash);
return _calculateOutputShapes<float16>(extraPointers, op, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs);
}
nd4j::ShapeList* NativeOps::calculateOutputShapesDouble(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationDouble(hash);
return _calculateOutputShapes<double>(extraPointers, op, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs);
}
template<typename T>
static FORCEINLINE Nd4jStatus realExec(nd4j::ops::DeclarableOp<T>* op, Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, T* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool isInplace) {
if (op == nullptr)
nd4j_printf("Can't find requested operation: [%lld]\n", hash);
// we're using the same fake nodeId everywhere here
std::vector<nd4j::NDArray<T>*> inputs(numInputs);
std::vector<nd4j::NDArray<T>*> outputs(numOutputs);
std::vector<T> ttArgs(numTArgs);
std::vector<Nd4jLong> iiArgs(numIArgs);
// filling block now with inputs
for (int e = 0; e < numInputs; e++) {
auto buffer = reinterpret_cast<T *>(inputBuffers[e]);
auto shape = reinterpret_cast<Nd4jLong *>(inputShapes[e]);
inputs[e] = new nd4j::NDArray<T>(buffer, shape);
}
// if not inplace - transferring output arrays
if (!isInplace)
for (int e = 0; e < numOutputs; e++) {
auto buffer = reinterpret_cast<T *>(outputBuffers[e]);
// we want to keep original output shape intact
auto shape = shape::copyShape(reinterpret_cast<Nd4jLong *>(outputShapes[e]));
auto array = new nd4j::NDArray<T>(buffer, shape);
outputs[e] = array;
// and we want to release shape copy once we're done
array->triggerAllocationFlag(false, true);
}
for (int e = 0; e < numIArgs; e++)
iiArgs[e] = iArgs[e];
for (int e = 0; e < numTArgs; e++)
ttArgs[e] = tArgs[e];
// hypothetically at this point we have everything filled
auto result = op->execute(inputs, outputs, ttArgs, iiArgs, isInplace);
//auto result = op->execute(inputs, ttArgs, iiArgs, isInplace);
if (!isInplace)
for (int e = 0; e < numOutputs; e++) {
//shape::printShapeInfoLinear("JVM output shape", (int *) outputShapes[e]);
//shape::printShapeInfoLinear("C++ output shape", (int *) outputs[e]->shapeInfo());
//outputs[e]->printIndexedBuffer("C++ raw output");
//outputs[e]->printBuffer("C++ indexed output");
if (outputs[e]->ordering() != shape::order(reinterpret_cast<Nd4jLong *>(outputShapes[e])))
outputs[e]->streamline(shape::order(reinterpret_cast<Nd4jLong *>(outputShapes[e])));
}
/*
if (!isInplace) {
if (result->size() != numOutputs) {
return ND4J_STATUS_BAD_OUTPUT;
}
for (int e = 0; e < numOutputs; e++) {
auto buffer = (T *) outputBuffers[e];
auto shape = (int *) outputShapes[e];
nd4j::NDArray<T> tmp(buffer, shape);
if (tmp.lengthOf() != result->at(e)->lengthOf()) {
nd4j_printf("Provided output array for [%s] has length of %i, but actual result has length of %i\n", op->getOpName()->c_str(), tmp.lengthOf(), result->at(e)->lengthOf());
return ND4J_STATUS_BAD_OUTPUT;
}
tmp.assign(result->at(e));
}
} else {
// if op is inplace, our ResultSet holds pointers
result->purge();
}
delete result;
*/
for (auto v: inputs)
delete v;
for (auto v: outputs)
delete v;
return Status::OK();
}
int NativeOps::execCustomOpFloat(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, float* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool isInplace) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationFloat(hash);
return realExec<float>(op, extraPointers, hash, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs, tArgs, numTArgs, iArgs, numIArgs, isInplace);
}
int NativeOps::execCustomOpDouble(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool isInplace) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationDouble(hash);
return realExec<double>(op, extraPointers, hash, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs, tArgs, numTArgs, iArgs, numIArgs, isInplace);
}
int NativeOps::execCustomOpHalf(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, float16* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool isInplace) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationHalf(hash);
return realExec<float16>(op, extraPointers, hash, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs, tArgs, numTArgs, iArgs, numIArgs, isInplace);
}
int NativeOps::registerGraphFloat(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer flatBufferPointer) {
auto graph = nd4j::graph::GraphExecutioner<float>::importFromFlatPointer(flatBufferPointer);
nd4j::graph::GraphHolder::getInstance()->registerGraph(graphId, graph);
return ND4J_STATUS_OK;
}
int NativeOps::registerGraphDouble(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer flatBufferPointer) {
auto graph = nd4j::graph::GraphExecutioner<double>::importFromFlatPointer(flatBufferPointer);
nd4j::graph::GraphHolder::getInstance()->registerGraph(graphId, graph);
return ND4J_STATUS_OK;
}
int NativeOps::registerGraphHalf(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer flatBufferPointer) {
auto graph = nd4j::graph::GraphExecutioner<float16>::importFromFlatPointer(flatBufferPointer);
nd4j::graph::GraphHolder::getInstance()->registerGraph(graphId, graph);
return ND4J_STATUS_OK;
}
template <typename T>
static VariablesSet<T>* executeStoredGraphT(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) {
auto graph = nd4j::graph::GraphHolder::getInstance()->pullGraph<T>(graphId);
auto varSpace = graph->getVariableSpace()->clone();
std::vector<nd4j::NDArray<T> *> handles;
for (int e = 0; e < numInputs; e++) {
auto idx = inputIndices[e];
// we'll delete this array later, together with cloned VariableSpace
auto array = new nd4j::NDArray<T>(reinterpret_cast<T *>(inputBuffers[e]), reinterpret_cast<Nd4jLong *>(inputShapes[e]));
handles.emplace_back(array);
if (varSpace->hasVariable(idx)) {
auto var = varSpace->getVariable(idx);
if (var->hasNDArray())
delete var->getNDArray();
var->setNDArray(array);
} else
varSpace->putVariable(idx, array);
}
auto result = nd4j::graph::GraphExecutioner<T>::execute(graph, varSpace);
auto varSet = new nd4j::graph::VariablesSet<T>(result);
if (result == ND4J_STATUS_OK) {
// pull back results, and provide them
auto outputs = graph->fetchOutputs();
for (int e = 0; e < outputs->size(); e++) {
// we're only getting variable ID/Index from original grap. values will be taken from cloned workspace
std::pair<int, int> varId(outputs->at(e)->id(), outputs->at(e)->index());
auto var = varSpace->getVariable(varId);
varSet->push_back(var->clone());
}
delete outputs;
}
delete varSpace;
return varSet;
}
VariablesSet<float>* NativeOps::executeStoredGraphFloat(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) {
return executeStoredGraphT<float>(extraPointers, graphId, inputBuffers, inputShapes, inputIndices, numInputs);
}
VariablesSet<float16>* NativeOps::executeStoredGraphHalf(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) {
return executeStoredGraphT<float16>(extraPointers, graphId, inputBuffers, inputShapes, inputIndices, numInputs);
}
VariablesSet<double>* NativeOps::executeStoredGraphDouble(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) {
return executeStoredGraphT<double>(extraPointers, graphId, inputBuffers, inputShapes, inputIndices, numInputs);
}
int NativeOps::unregisterGraph(Nd4jPointer *extraPointers, Nd4jLong graphId) {
nd4j::graph::GraphHolder::getInstance()->dropGraphAny(graphId);
return ND4J_STATUS_OK;
}
void NativeOps::deletePointerArray(Nd4jPointer pointer) {
Nd4jPointer *ptr = reinterpret_cast<Nd4jPointer *>(pointer);
delete[] ptr;
}
void NativeOps::deleteIntArray(Nd4jPointer pointer) {
auto ptr = reinterpret_cast<int *>(pointer);
delete[] ptr;
}
void NativeOps::deleteLongArray(Nd4jPointer pointer) {
auto ptr = reinterpret_cast<Nd4jLong *>(pointer);
delete[] ptr;
}
template <typename T>
static void deleteVariablesSetT(Nd4jPointer pointer) {
nd4j::graph::VariablesSet<T>* ptr = reinterpret_cast<nd4j::graph::VariablesSet<T>*>(pointer);
delete ptr;
}
void NativeOps::deleteVariablesSetFloat(Nd4jPointer pointer) {
deleteVariablesSetT<float>(pointer);
}
void NativeOps::deleteVariablesSetHalf(Nd4jPointer pointer) {
deleteVariablesSetT<float16>(pointer);
}
void NativeOps::deleteVariablesSetDouble(Nd4jPointer pointer) {
deleteVariablesSetT<double>(pointer);
}
void NativeOps::deleteShapeList(Nd4jPointer shapeList) {
nd4j::ShapeList* list = reinterpret_cast<nd4j::ShapeList*>(shapeList);
list->destroy();
delete list;
}
const char* NativeOps::getAllOperations() {
return nd4j::OpTracker::getInstance()->exportOperations();
}
Nd4jPointer NativeOps::getGraphStateHalf(Nd4jLong id) {
return (Nd4jPointer) new nd4j::graph::GraphState<float16>(id);
}
Nd4jPointer NativeOps::getGraphStateFloat(Nd4jLong id) {
return (Nd4jPointer) new nd4j::graph::GraphState<float>(id);
}
Nd4jPointer NativeOps::getGraphStateDouble(Nd4jLong id) {
return (Nd4jPointer) new nd4j::graph::GraphState<double>(id);
}
void NativeOps::deleteGraphStateHalf(Nd4jPointer state) {
auto stateP = reinterpret_cast<nd4j::graph::GraphState<float16> *>(state);
delete stateP;
}
void NativeOps::deleteGraphStateFloat(Nd4jPointer state) {
auto stateP = reinterpret_cast<nd4j::graph::GraphState<float> *>(state);
delete stateP;
}
void NativeOps::deleteGraphStateDouble(Nd4jPointer state) {
auto stateP = reinterpret_cast<nd4j::graph::GraphState<double> *>(state);
delete stateP;
}
template <typename T>
Nd4jStatus execCustomOpWithScope(Nd4jPointer *extraPointers, nd4j::graph::GraphState<T> *state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) {
/**
* That's basically exec, with VariableSpace provided in GraphState:
* depending on operation (i.e. while of if), different logic executors could be used
*/
auto graph = state->graph();
auto varSpace = state->variableSpace();
// Node is dynamically created, and has nothing beyond it: only inputs and outputs
// this node has id of 0, and inputs are
nd4j::graph::Node<T> node(OpType_LOGIC, opHash, 0);
// mapping inputs
for (int e = 0; e < numInputs; e++) {
auto buffer = reinterpret_cast<T *>(inputBuffers[e]);
auto shapeInfo = reinterpret_cast<Nd4jLong *>(inputShapes[e]);
auto array = new nd4j::NDArray<T>(buffer, shapeInfo, varSpace->workspace());
// now we just put array to VarSpace
varSpace->putVariable(0, e, array);
node.pickInput(0, e);
}
// mapping scopes
for (int e = 0; e < numScopes; e++) {
// we should check scope existence in GraphState/Graph
int scopeId = (int) scopes[e];
if (!state->hasScope(scopeId)) {
nd4j_printf("execCustomOpWithScope: referenced scope [%i] doesn't exist\n", scopeId);
return Status::THROW();
}
node.pickInput(scopeId, 0);
}
auto result = LogicExecutor<T>::processNode(graph, &node);
if (result != Status::OK())
return result;
// mapping outputs
for (int e = 0; e < numOutputs; e++) {
auto buffer = reinterpret_cast<T *>(outputBuffers[e]);
auto shapeInfo = reinterpret_cast<Nd4jLong *>(outputShapes[e]);
nd4j::NDArray<T> array(buffer, shapeInfo, varSpace->workspace());
// now we just put array to VarSpace to the same ID
//varSpace->putVariable(0, e, array);
auto t = varSpace->getVariable(0, e)->getNDArray();
array.assign(t);
}
// removing input variables
for (int e = 0; e < numInputs; e++) {
varSpace->dropVariable(0, e);
}
// after some bla-bla-bla we should have Graph and Node for current op
return Status::OK();
}
Nd4jStatus NativeOps::execCustomOpWithScopeHalf(Nd4jPointer *extraPointers, Nd4jPointer state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) {
return execCustomOpWithScope<float16>(extraPointers, reinterpret_cast<nd4j::graph::GraphState<float16> *>(state), opHash, scopes, numScopes, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs);
}
Nd4jStatus NativeOps::execCustomOpWithScopeFloat(Nd4jPointer *extraPointers, Nd4jPointer state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) {
return execCustomOpWithScope<float>(extraPointers, reinterpret_cast<nd4j::graph::GraphState<float> *>(state), opHash, scopes, numScopes, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs);
}
Nd4jStatus NativeOps::execCustomOpWithScopeDouble(Nd4jPointer *extraPointers, Nd4jPointer state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) {
return execCustomOpWithScope<double>(extraPointers, reinterpret_cast<nd4j::graph::GraphState<double> *>(state), opHash, scopes, numScopes, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs);
}
void NativeOps::deleteResultWrapper(Nd4jPointer ptr) {
// just 0 room for compiler s@!t
auto p = reinterpret_cast<nd4j::graph::ResultWrapper *>(ptr);
delete p;
}
/*
* TypeDef:
* void convertTypes(Nd4jPointer *extras, int srcType, Nd4jPointer x, long N, int dstType, Nd4jPointer z);
*/
void NativeOps::convertTypes(Nd4jPointer *extras, int srcType, Nd4jPointer x, Nd4jLong N, int dstType, Nd4jPointer z) {
auto dx = reinterpret_cast<void *>(x);
auto dz = reinterpret_cast<void *>(z);
if (srcType == ND4J_FLOAT8) {
if (dstType == ND4J_FLOAT8) {
// convertKernel<double, nd4j::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
nd4j::TypeCast::convertGeneric<nd4j::float8, nd4j::int8>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
nd4j::TypeCast::convertGeneric<nd4j::float8, nd4j::uint8>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
nd4j::TypeCast::convertGeneric<nd4j::float8, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
nd4j::TypeCast::convertGeneric<nd4j::float8, nd4j::int16>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
nd4j::TypeCast::convertGeneric<nd4j::float8, nd4j::uint16>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_FLOAT32) {
nd4j::TypeCast::convertGeneric<nd4j::float8, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
nd4j::TypeCast::convertGeneric<nd4j::float8, double>(extras, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_INT8) {
if (dstType == ND4J_FLOAT8) {
nd4j::TypeCast::convertGeneric<nd4j::int8, nd4j::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
//convertKernel<nd4j::int8, nd4j::int8>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
nd4j::TypeCast::convertGeneric<nd4j::int8, nd4j::uint8>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
nd4j::TypeCast::convertGeneric<nd4j::int8, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
nd4j::TypeCast::convertGeneric<nd4j::int8, nd4j::int16>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
nd4j::TypeCast::convertGeneric<nd4j::int8, nd4j::uint16>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO: eventually we might want to add it
} else if (dstType == ND4J_FLOAT32) {
nd4j::TypeCast::convertGeneric<nd4j::int8, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
nd4j::TypeCast::convertGeneric<nd4j::int8, double>(extras, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_UINT8) {
if (dstType == ND4J_FLOAT8) {
nd4j::TypeCast::convertGeneric<nd4j::uint8, nd4j::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
nd4j::TypeCast::convertGeneric<nd4j::uint8, nd4j::int8>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
nd4j::TypeCast::convertGeneric<nd4j::uint8, nd4j::uint8>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
nd4j::TypeCast::convertGeneric<nd4j::uint8, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
nd4j::TypeCast::convertGeneric<nd4j::uint8, nd4j::int16>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
nd4j::TypeCast::convertGeneric<nd4j::uint8, nd4j::uint16>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO: still might want to add
} else if (dstType == ND4J_FLOAT32) {
nd4j::TypeCast::convertGeneric<nd4j::uint8, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
nd4j::TypeCast::convertGeneric<nd4j::uint8, double>(extras, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_FLOAT16) {
if (dstType == ND4J_FLOAT8) {
nd4j::TypeCast::convertGeneric<float16, nd4j::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
nd4j::TypeCast::convertGeneric<float16, nd4j::int8>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
nd4j::TypeCast::convertGeneric<float16, nd4j::uint8>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
nd4j::TypeCast::convertGeneric<float16, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
nd4j::TypeCast::convertGeneric<float16, nd4j::int16>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
nd4j::TypeCast::convertGeneric<float16, nd4j::uint16>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO: .... ^^^
} else if (dstType == ND4J_FLOAT32) {
nd4j::TypeCast::convertGeneric<float16, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
nd4j::TypeCast::convertGeneric<float16, double>(extras, dx, N, dz);
} else if (dstType == ND4J_THRESHOLD) {
//nd4j::convertToThreshold<float16>(nullptr, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_INT16) {
if (dstType == ND4J_FLOAT8) {
nd4j::TypeCast::convertGeneric<nd4j::int16, nd4j::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
nd4j::TypeCast::convertGeneric<nd4j::int16, nd4j::int8>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
nd4j::TypeCast::convertGeneric<nd4j::int16, nd4j::uint8>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
nd4j::TypeCast::convertGeneric<nd4j::int16, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
nd4j::TypeCast::convertGeneric<nd4j::int16, nd4j::int16>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
nd4j::TypeCast::convertGeneric<nd4j::int16, nd4j::uint16>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO...
} else if (dstType == ND4J_FLOAT32) {
nd4j::TypeCast::convertGeneric<nd4j::int16, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
nd4j::TypeCast::convertGeneric<nd4j::int16, double>(extras, dx, N, dz);
} else {
printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_FLOAT24) {
} else if (srcType == ND4J_FLOAT32) {
if (dstType == ND4J_FLOAT8) {
nd4j::TypeCast::convertGeneric<float, nd4j::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
nd4j::TypeCast::convertGeneric<float, nd4j::int8>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
nd4j::TypeCast::convertGeneric<float, nd4j::uint8>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
nd4j::TypeCast::convertGeneric<float, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
nd4j::TypeCast::convertGeneric<float, nd4j::int16>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
nd4j::TypeCast::convertGeneric<float, nd4j::uint16>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_DOUBLE) {
nd4j::TypeCast::convertGeneric<float, double>(extras, dx, N, dz);
} else if (dstType == ND4J_THRESHOLD) {
//nd4j::convertToThreshold<float>(nullptr, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_DOUBLE) {
if (dstType == ND4J_FLOAT8) {
nd4j::TypeCast::convertGeneric<double, nd4j::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
nd4j::TypeCast::convertGeneric<double, nd4j::int8>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
nd4j::TypeCast::convertGeneric<double, nd4j::uint8>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
nd4j::TypeCast::convertGeneric<double, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
nd4j::TypeCast::convertGeneric<double, nd4j::int16>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
nd4j::TypeCast::convertGeneric<double, nd4j::uint16>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_FLOAT32) {
nd4j::TypeCast::convertGeneric<double, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
//
} else if (dstType == ND4J_THRESHOLD) {
//nd4j::convertToThreshold<double>(nullptr, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_THRESHOLD) {
if (dstType == ND4J_FLOAT16) {
//nd4j::convertFromThreshold<float16>(nullptr, dx, N, dz);
} else if (dstType == ND4J_FLOAT32) {
//nd4j::convertFromThreshold<float>(nullptr, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
//nd4j::convertFromThreshold<double>(nullptr, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
}
|
dfe02b6bdab4e048b94d236b82149093ea49839f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "j2d5pt-512-16-128_kernel.hu"
__device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; }
__global__ void kernel0_16(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 16;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 480;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
float __reg_8_0;
float __reg_8_1;
float __reg_8_2;
float __reg_9_0;
float __reg_9_1;
float __reg_9_2;
float __reg_10_0;
float __reg_10_1;
float __reg_10_2;
float __reg_11_0;
float __reg_11_1;
float __reg_11_2;
float __reg_12_0;
float __reg_12_1;
float __reg_12_2;
float __reg_13_0;
float __reg_13_1;
float __reg_13_2;
float __reg_14_0;
float __reg_14_1;
float __reg_14_2;
float __reg_15_0;
float __reg_15_1;
float __reg_15_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10);
const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11);
const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12);
const AN5D_TYPE __writeValid13 = __updateValid && __local_c2 >= (__halo2 * 13) && __local_c2 < __side2LenOl - (__halo2 * 13);
const AN5D_TYPE __writeValid14 = __updateValid && __local_c2 >= (__halo2 * 14) && __local_c2 < __side2LenOl - (__halo2 * 14);
const AN5D_TYPE __writeValid15 = __updateValid && __local_c2 >= (__halo2 * 15) && __local_c2 < __side2LenOl - (__halo2 * 15);
const AN5D_TYPE __writeValid16 = __updateValid && __local_c2 >= (__halo2 * 16) && __local_c2 < __side2LenOl - (__halo2 * 16);
const AN5D_TYPE __storeValid = __writeValid16;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC11(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid11) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC12(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid12) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC13(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid13) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC14(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid14) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC15(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid15) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_15_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_15_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_15_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_15_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_15_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_15_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_15_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_15_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_15_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_15_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_15_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_15_0, __reg_10_1, __reg_10_2);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_15_0, __reg_11_1, __reg_11_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_15_0, __reg_12_1, __reg_12_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_15_0, __reg_13_1, __reg_13_2);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_15_0, __reg_14_1, __reg_14_2);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(1, __reg_15_0, __reg_15_1, __reg_15_2);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(2, __reg_15_1, __reg_15_2, __reg_15_0);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(3, __reg_15_2, __reg_15_0, __reg_15_1);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(4, __reg_15_0, __reg_15_1, __reg_15_2);
__LOAD(__reg_0_0, 21);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(5, __reg_15_1, __reg_15_2, __reg_15_0);
__LOAD(__reg_0_1, 22);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(6, __reg_15_2, __reg_15_0, __reg_15_1);
__LOAD(__reg_0_2, 23);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(7, __reg_15_0, __reg_15_1, __reg_15_2);
__LOAD(__reg_0_0, 24);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(8, __reg_15_1, __reg_15_2, __reg_15_0);
__LOAD(__reg_0_1, 25);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(9, __reg_15_2, __reg_15_0, __reg_15_1);
__LOAD(__reg_0_2, 26);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(10, __reg_15_0, __reg_15_1, __reg_15_2);
__LOAD(__reg_0_0, 27);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(11, __reg_15_1, __reg_15_2, __reg_15_0);
__LOAD(__reg_0_1, 28);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(12, __reg_15_2, __reg_15_0, __reg_15_1);
__LOAD(__reg_0_2, 29);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(13, __reg_15_0, __reg_15_1, __reg_15_2);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(14, __reg_15_1, __reg_15_2, __reg_15_0);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(15, __reg_15_2, __reg_15_0, __reg_15_1);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(16, __reg_15_0, __reg_15_1, __reg_15_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 21);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__LOAD(__reg_0_1, 22);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__LOAD(__reg_0_2, 23);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__LOAD(__reg_0_0, 24);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__LOAD(__reg_0_1, 25);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__LOAD(__reg_0_2, 26);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__LOAD(__reg_0_0, 27);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__LOAD(__reg_0_1, 28);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__LOAD(__reg_0_2, 29);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(16, __reg_15_0, __reg_15_1, __reg_15_2);
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 33; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(__h - 16, __reg_15_1, __reg_15_2, __reg_15_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(__h - 16, __reg_15_2, __reg_15_0, __reg_15_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(__h - 16, __reg_15_0, __reg_15_1, __reg_15_2);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(__h - 16, __reg_15_1, __reg_15_2, __reg_15_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(__h - 15, __reg_15_2, __reg_15_0, __reg_15_1);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(__h - 14, __reg_15_0, __reg_15_1, __reg_15_2);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(__h - 13, __reg_15_1, __reg_15_2, __reg_15_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(__h - 12, __reg_15_2, __reg_15_0, __reg_15_1);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(__h - 11, __reg_15_0, __reg_15_1, __reg_15_2);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(__h - 10, __reg_15_1, __reg_15_2, __reg_15_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(__h - 9, __reg_15_2, __reg_15_0, __reg_15_1);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(__h - 8, __reg_15_0, __reg_15_1, __reg_15_2);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_0_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(__h - 7, __reg_15_1, __reg_15_2, __reg_15_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_0_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(__h - 6, __reg_15_2, __reg_15_0, __reg_15_1);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_0_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(__h - 5, __reg_15_0, __reg_15_1, __reg_15_2);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_0_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(__h - 4, __reg_15_1, __reg_15_2, __reg_15_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_0_2);
__STORE(__h - 3, __reg_15_2, __reg_15_0, __reg_15_1);
__STORE(__h - 2, __reg_15_0, __reg_15_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(__h - 16, __reg_15_1, __reg_15_2, __reg_15_0);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(__h - 15, __reg_15_2, __reg_15_0, __reg_15_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(__h - 14, __reg_15_0, __reg_15_1, __reg_15_2);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(__h - 13, __reg_15_1, __reg_15_2, __reg_15_0);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(__h - 12, __reg_15_2, __reg_15_0, __reg_15_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(__h - 11, __reg_15_0, __reg_15_1, __reg_15_2);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(__h - 10, __reg_15_1, __reg_15_2, __reg_15_0);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(__h - 9, __reg_15_2, __reg_15_0, __reg_15_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(__h - 8, __reg_15_0, __reg_15_1, __reg_15_2);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(__h - 7, __reg_15_1, __reg_15_2, __reg_15_0);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_0_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(__h - 6, __reg_15_2, __reg_15_0, __reg_15_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_0_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(__h - 5, __reg_15_0, __reg_15_1, __reg_15_2);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_0_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(__h - 4, __reg_15_1, __reg_15_2, __reg_15_0);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_0_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(__h - 3, __reg_15_2, __reg_15_0, __reg_15_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_0_0);
__STORE(__h - 2, __reg_15_0, __reg_15_1, __reg_15_2);
__STORE(__h - 1, __reg_15_1, __reg_15_2, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(__h - 16, __reg_15_1, __reg_15_2, __reg_15_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(__h - 15, __reg_15_2, __reg_15_0, __reg_15_1);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(__h - 14, __reg_15_0, __reg_15_1, __reg_15_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(__h - 13, __reg_15_1, __reg_15_2, __reg_15_0);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(__h - 12, __reg_15_2, __reg_15_0, __reg_15_1);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(__h - 11, __reg_15_0, __reg_15_1, __reg_15_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(__h - 10, __reg_15_1, __reg_15_2, __reg_15_0);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(__h - 9, __reg_15_2, __reg_15_0, __reg_15_1);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(__h - 8, __reg_15_0, __reg_15_1, __reg_15_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(__h - 7, __reg_15_1, __reg_15_2, __reg_15_0);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(__h - 6, __reg_15_2, __reg_15_0, __reg_15_1);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_0_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(__h - 5, __reg_15_0, __reg_15_1, __reg_15_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_0_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(__h - 4, __reg_15_1, __reg_15_2, __reg_15_0);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_0_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(__h - 3, __reg_15_2, __reg_15_0, __reg_15_1);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_0_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(__h - 2, __reg_15_0, __reg_15_1, __reg_15_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_0_1);
__STORE(__h - 1, __reg_15_1, __reg_15_2, __reg_15_0);
__STORE(__h + 0, __reg_15_2, __reg_15_0, __reg_0_1);
}
}
else
{
for (__h = 33; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(__h - 16, __reg_15_1, __reg_15_2, __reg_15_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(__h - 16, __reg_15_2, __reg_15_0, __reg_15_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(__h - 16, __reg_15_0, __reg_15_1, __reg_15_2);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(__h - 16, __reg_15_1, __reg_15_2, __reg_15_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(__h - 16, __reg_15_2, __reg_15_0, __reg_15_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(__h - 16, __reg_15_0, __reg_15_1, __reg_15_2);
__h++;
}
}
__global__ void kernel0_15(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 15;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 482;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
float __reg_8_0;
float __reg_8_1;
float __reg_8_2;
float __reg_9_0;
float __reg_9_1;
float __reg_9_2;
float __reg_10_0;
float __reg_10_1;
float __reg_10_2;
float __reg_11_0;
float __reg_11_1;
float __reg_11_2;
float __reg_12_0;
float __reg_12_1;
float __reg_12_2;
float __reg_13_0;
float __reg_13_1;
float __reg_13_2;
float __reg_14_0;
float __reg_14_1;
float __reg_14_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10);
const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11);
const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12);
const AN5D_TYPE __writeValid13 = __updateValid && __local_c2 >= (__halo2 * 13) && __local_c2 < __side2LenOl - (__halo2 * 13);
const AN5D_TYPE __writeValid14 = __updateValid && __local_c2 >= (__halo2 * 14) && __local_c2 < __side2LenOl - (__halo2 * 14);
const AN5D_TYPE __writeValid15 = __updateValid && __local_c2 >= (__halo2 * 15) && __local_c2 < __side2LenOl - (__halo2 * 15);
const AN5D_TYPE __storeValid = __writeValid15;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC11(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid11) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC12(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid12) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC13(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid13) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC14(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid14) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_14_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_14_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_14_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_14_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_14_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_14_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_14_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_14_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_14_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_14_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_14_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_14_0, __reg_10_1, __reg_10_2);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_14_0, __reg_11_1, __reg_11_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_14_0, __reg_12_1, __reg_12_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_14_0, __reg_13_1, __reg_13_2);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(1, __reg_14_0, __reg_14_1, __reg_14_2);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(2, __reg_14_1, __reg_14_2, __reg_14_0);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(3, __reg_14_2, __reg_14_0, __reg_14_1);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(4, __reg_14_0, __reg_14_1, __reg_14_2);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(5, __reg_14_1, __reg_14_2, __reg_14_0);
__LOAD(__reg_0_0, 21);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(6, __reg_14_2, __reg_14_0, __reg_14_1);
__LOAD(__reg_0_1, 22);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(7, __reg_14_0, __reg_14_1, __reg_14_2);
__LOAD(__reg_0_2, 23);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(8, __reg_14_1, __reg_14_2, __reg_14_0);
__LOAD(__reg_0_0, 24);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(9, __reg_14_2, __reg_14_0, __reg_14_1);
__LOAD(__reg_0_1, 25);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(10, __reg_14_0, __reg_14_1, __reg_14_2);
__LOAD(__reg_0_2, 26);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(11, __reg_14_1, __reg_14_2, __reg_14_0);
__LOAD(__reg_0_0, 27);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(12, __reg_14_2, __reg_14_0, __reg_14_1);
__LOAD(__reg_0_1, 28);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(13, __reg_14_0, __reg_14_1, __reg_14_2);
__LOAD(__reg_0_2, 29);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(14, __reg_14_1, __reg_14_2, __reg_14_0);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(15, __reg_14_2, __reg_14_0, __reg_14_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 21);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__LOAD(__reg_0_1, 22);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__LOAD(__reg_0_2, 23);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__LOAD(__reg_0_0, 24);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__LOAD(__reg_0_1, 25);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__LOAD(__reg_0_2, 26);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__LOAD(__reg_0_0, 27);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__LOAD(__reg_0_1, 28);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__LOAD(__reg_0_2, 29);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(15, __reg_14_2, __reg_14_0, __reg_14_1);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 31; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(__h - 15, __reg_14_0, __reg_14_1, __reg_14_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(__h - 15, __reg_14_1, __reg_14_2, __reg_14_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(__h - 15, __reg_14_2, __reg_14_0, __reg_14_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(__h - 15, __reg_14_0, __reg_14_1, __reg_14_2);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(__h - 14, __reg_14_1, __reg_14_2, __reg_14_0);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(__h - 13, __reg_14_2, __reg_14_0, __reg_14_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(__h - 12, __reg_14_0, __reg_14_1, __reg_14_2);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(__h - 11, __reg_14_1, __reg_14_2, __reg_14_0);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(__h - 10, __reg_14_2, __reg_14_0, __reg_14_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(__h - 9, __reg_14_0, __reg_14_1, __reg_14_2);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(__h - 8, __reg_14_1, __reg_14_2, __reg_14_0);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(__h - 7, __reg_14_2, __reg_14_0, __reg_14_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_0_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(__h - 6, __reg_14_0, __reg_14_1, __reg_14_2);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_0_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(__h - 5, __reg_14_1, __reg_14_2, __reg_14_0);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_0_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(__h - 4, __reg_14_2, __reg_14_0, __reg_14_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_0_0);
__STORE(__h - 3, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(__h - 2, __reg_14_1, __reg_14_2, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(__h - 15, __reg_14_0, __reg_14_1, __reg_14_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(__h - 14, __reg_14_1, __reg_14_2, __reg_14_0);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(__h - 13, __reg_14_2, __reg_14_0, __reg_14_1);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(__h - 12, __reg_14_0, __reg_14_1, __reg_14_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(__h - 11, __reg_14_1, __reg_14_2, __reg_14_0);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(__h - 10, __reg_14_2, __reg_14_0, __reg_14_1);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(__h - 9, __reg_14_0, __reg_14_1, __reg_14_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(__h - 8, __reg_14_1, __reg_14_2, __reg_14_0);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(__h - 7, __reg_14_2, __reg_14_0, __reg_14_1);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(__h - 6, __reg_14_0, __reg_14_1, __reg_14_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_0_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(__h - 5, __reg_14_1, __reg_14_2, __reg_14_0);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_0_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(__h - 4, __reg_14_2, __reg_14_0, __reg_14_1);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_0_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(__h - 3, __reg_14_0, __reg_14_1, __reg_14_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_0_1);
__STORE(__h - 2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(__h - 1, __reg_14_2, __reg_14_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(__h - 15, __reg_14_0, __reg_14_1, __reg_14_2);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(__h - 14, __reg_14_1, __reg_14_2, __reg_14_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(__h - 13, __reg_14_2, __reg_14_0, __reg_14_1);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(__h - 12, __reg_14_0, __reg_14_1, __reg_14_2);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(__h - 11, __reg_14_1, __reg_14_2, __reg_14_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(__h - 10, __reg_14_2, __reg_14_0, __reg_14_1);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(__h - 9, __reg_14_0, __reg_14_1, __reg_14_2);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(__h - 8, __reg_14_1, __reg_14_2, __reg_14_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(__h - 7, __reg_14_2, __reg_14_0, __reg_14_1);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(__h - 6, __reg_14_0, __reg_14_1, __reg_14_2);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(__h - 5, __reg_14_1, __reg_14_2, __reg_14_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_0_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(__h - 4, __reg_14_2, __reg_14_0, __reg_14_1);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_0_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(__h - 3, __reg_14_0, __reg_14_1, __reg_14_2);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_0_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(__h - 2, __reg_14_1, __reg_14_2, __reg_14_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_0_2);
__STORE(__h - 1, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(__h + 0, __reg_14_0, __reg_14_1, __reg_0_2);
}
}
else
{
for (__h = 31; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(__h - 15, __reg_14_0, __reg_14_1, __reg_14_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(__h - 15, __reg_14_1, __reg_14_2, __reg_14_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(__h - 15, __reg_14_2, __reg_14_0, __reg_14_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(__h - 15, __reg_14_0, __reg_14_1, __reg_14_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(__h - 15, __reg_14_1, __reg_14_2, __reg_14_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(__h - 15, __reg_14_2, __reg_14_0, __reg_14_1);
__h++;
}
}
__global__ void kernel0_14(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 14;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 484;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
float __reg_8_0;
float __reg_8_1;
float __reg_8_2;
float __reg_9_0;
float __reg_9_1;
float __reg_9_2;
float __reg_10_0;
float __reg_10_1;
float __reg_10_2;
float __reg_11_0;
float __reg_11_1;
float __reg_11_2;
float __reg_12_0;
float __reg_12_1;
float __reg_12_2;
float __reg_13_0;
float __reg_13_1;
float __reg_13_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10);
const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11);
const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12);
const AN5D_TYPE __writeValid13 = __updateValid && __local_c2 >= (__halo2 * 13) && __local_c2 < __side2LenOl - (__halo2 * 13);
const AN5D_TYPE __writeValid14 = __updateValid && __local_c2 >= (__halo2 * 14) && __local_c2 < __side2LenOl - (__halo2 * 14);
const AN5D_TYPE __storeValid = __writeValid14;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC11(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid11) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC12(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid12) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC13(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid13) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_13_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_13_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_13_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_13_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_13_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_13_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_13_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_13_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_13_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_13_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_13_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_13_0, __reg_10_1, __reg_10_2);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_13_0, __reg_11_1, __reg_11_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_13_0, __reg_12_1, __reg_12_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(1, __reg_13_0, __reg_13_1, __reg_13_2);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(2, __reg_13_1, __reg_13_2, __reg_13_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(3, __reg_13_2, __reg_13_0, __reg_13_1);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(4, __reg_13_0, __reg_13_1, __reg_13_2);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(5, __reg_13_1, __reg_13_2, __reg_13_0);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(6, __reg_13_2, __reg_13_0, __reg_13_1);
__LOAD(__reg_0_0, 21);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(7, __reg_13_0, __reg_13_1, __reg_13_2);
__LOAD(__reg_0_1, 22);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(8, __reg_13_1, __reg_13_2, __reg_13_0);
__LOAD(__reg_0_2, 23);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(9, __reg_13_2, __reg_13_0, __reg_13_1);
__LOAD(__reg_0_0, 24);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(10, __reg_13_0, __reg_13_1, __reg_13_2);
__LOAD(__reg_0_1, 25);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(11, __reg_13_1, __reg_13_2, __reg_13_0);
__LOAD(__reg_0_2, 26);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(12, __reg_13_2, __reg_13_0, __reg_13_1);
__LOAD(__reg_0_0, 27);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(13, __reg_13_0, __reg_13_1, __reg_13_2);
__LOAD(__reg_0_1, 28);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(14, __reg_13_1, __reg_13_2, __reg_13_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 21);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__LOAD(__reg_0_1, 22);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__LOAD(__reg_0_2, 23);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__LOAD(__reg_0_0, 24);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__LOAD(__reg_0_1, 25);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__LOAD(__reg_0_2, 26);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__LOAD(__reg_0_0, 27);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__LOAD(__reg_0_1, 28);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(14, __reg_13_1, __reg_13_2, __reg_13_0);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 29; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(__h - 14, __reg_13_2, __reg_13_0, __reg_13_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(__h - 14, __reg_13_0, __reg_13_1, __reg_13_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(__h - 14, __reg_13_1, __reg_13_2, __reg_13_0);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(__h - 14, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(__h - 13, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(__h - 12, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(__h - 11, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(__h - 10, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(__h - 9, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(__h - 8, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(__h - 7, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(__h - 6, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_0_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(__h - 5, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_0_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(__h - 4, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_0_1);
__STORE(__h - 3, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(__h - 2, __reg_13_2, __reg_13_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(__h - 14, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(__h - 13, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(__h - 12, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(__h - 11, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(__h - 10, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(__h - 9, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(__h - 8, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(__h - 7, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(__h - 6, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(__h - 5, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_0_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(__h - 4, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_0_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(__h - 3, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_0_2);
__STORE(__h - 2, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(__h - 1, __reg_13_0, __reg_13_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(__h - 14, __reg_13_2, __reg_13_0, __reg_13_1);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(__h - 13, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(__h - 12, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(__h - 11, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(__h - 10, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(__h - 9, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(__h - 8, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(__h - 7, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(__h - 6, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(__h - 5, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(__h - 4, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_0_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(__h - 3, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_0_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(__h - 2, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_0_0);
__STORE(__h - 1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(__h + 0, __reg_13_1, __reg_13_2, __reg_0_0);
}
}
else
{
for (__h = 29; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(__h - 14, __reg_13_2, __reg_13_0, __reg_13_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(__h - 14, __reg_13_0, __reg_13_1, __reg_13_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(__h - 14, __reg_13_1, __reg_13_2, __reg_13_0);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(__h - 14, __reg_13_2, __reg_13_0, __reg_13_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(__h - 14, __reg_13_0, __reg_13_1, __reg_13_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(__h - 14, __reg_13_1, __reg_13_2, __reg_13_0);
__h++;
}
}
__global__ void kernel0_13(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 13;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 486;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
float __reg_8_0;
float __reg_8_1;
float __reg_8_2;
float __reg_9_0;
float __reg_9_1;
float __reg_9_2;
float __reg_10_0;
float __reg_10_1;
float __reg_10_2;
float __reg_11_0;
float __reg_11_1;
float __reg_11_2;
float __reg_12_0;
float __reg_12_1;
float __reg_12_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10);
const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11);
const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12);
const AN5D_TYPE __writeValid13 = __updateValid && __local_c2 >= (__halo2 * 13) && __local_c2 < __side2LenOl - (__halo2 * 13);
const AN5D_TYPE __storeValid = __writeValid13;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC11(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid11) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC12(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid12) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_12_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_12_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_12_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_12_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_12_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_12_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_12_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_12_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_12_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_12_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_12_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_12_0, __reg_10_1, __reg_10_2);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_12_0, __reg_11_1, __reg_11_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(1, __reg_12_0, __reg_12_1, __reg_12_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(2, __reg_12_1, __reg_12_2, __reg_12_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(3, __reg_12_2, __reg_12_0, __reg_12_1);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(4, __reg_12_0, __reg_12_1, __reg_12_2);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(5, __reg_12_1, __reg_12_2, __reg_12_0);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(6, __reg_12_2, __reg_12_0, __reg_12_1);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(7, __reg_12_0, __reg_12_1, __reg_12_2);
__LOAD(__reg_0_0, 21);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(8, __reg_12_1, __reg_12_2, __reg_12_0);
__LOAD(__reg_0_1, 22);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(9, __reg_12_2, __reg_12_0, __reg_12_1);
__LOAD(__reg_0_2, 23);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(10, __reg_12_0, __reg_12_1, __reg_12_2);
__LOAD(__reg_0_0, 24);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(11, __reg_12_1, __reg_12_2, __reg_12_0);
__LOAD(__reg_0_1, 25);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(12, __reg_12_2, __reg_12_0, __reg_12_1);
__LOAD(__reg_0_2, 26);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(13, __reg_12_0, __reg_12_1, __reg_12_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 21);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__LOAD(__reg_0_1, 22);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__LOAD(__reg_0_2, 23);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__LOAD(__reg_0_0, 24);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__LOAD(__reg_0_1, 25);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__LOAD(__reg_0_2, 26);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(13, __reg_12_0, __reg_12_1, __reg_12_2);
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 27; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 13, __reg_12_2, __reg_12_0, __reg_12_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 13, __reg_12_0, __reg_12_1, __reg_12_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 12, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 11, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 10, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 9, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 8, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 7, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 6, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 5, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_0_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 4, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_0_2);
__STORE(__h - 3, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(__h - 2, __reg_12_0, __reg_12_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 12, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 11, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 10, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 9, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 8, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 7, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 6, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 5, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 4, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_0_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 3, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_0_0);
__STORE(__h - 2, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(__h - 1, __reg_12_1, __reg_12_2, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 12, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 11, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 10, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 9, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 8, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 7, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 6, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 5, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 4, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 3, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_0_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 2, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_0_1);
__STORE(__h - 1, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(__h + 0, __reg_12_2, __reg_12_0, __reg_0_1);
}
}
else
{
for (__h = 27; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 13, __reg_12_2, __reg_12_0, __reg_12_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 13, __reg_12_0, __reg_12_1, __reg_12_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 13, __reg_12_2, __reg_12_0, __reg_12_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 13, __reg_12_0, __reg_12_1, __reg_12_2);
__h++;
}
}
__global__ void kernel0_12(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 12;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
float __reg_8_0;
float __reg_8_1;
float __reg_8_2;
float __reg_9_0;
float __reg_9_1;
float __reg_9_2;
float __reg_10_0;
float __reg_10_1;
float __reg_10_2;
float __reg_11_0;
float __reg_11_1;
float __reg_11_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10);
const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11);
const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12);
const AN5D_TYPE __storeValid = __writeValid12;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC11(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid11) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_11_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_11_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_11_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_11_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_11_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_11_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_11_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_11_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_11_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_11_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_11_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_11_0, __reg_10_1, __reg_10_2);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(1, __reg_11_0, __reg_11_1, __reg_11_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(2, __reg_11_1, __reg_11_2, __reg_11_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(3, __reg_11_2, __reg_11_0, __reg_11_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(4, __reg_11_0, __reg_11_1, __reg_11_2);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(5, __reg_11_1, __reg_11_2, __reg_11_0);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(6, __reg_11_2, __reg_11_0, __reg_11_1);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(7, __reg_11_0, __reg_11_1, __reg_11_2);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(8, __reg_11_1, __reg_11_2, __reg_11_0);
__LOAD(__reg_0_0, 21);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(9, __reg_11_2, __reg_11_0, __reg_11_1);
__LOAD(__reg_0_1, 22);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(10, __reg_11_0, __reg_11_1, __reg_11_2);
__LOAD(__reg_0_2, 23);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(11, __reg_11_1, __reg_11_2, __reg_11_0);
__LOAD(__reg_0_0, 24);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(12, __reg_11_2, __reg_11_0, __reg_11_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 21);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__LOAD(__reg_0_1, 22);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__LOAD(__reg_0_2, 23);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__LOAD(__reg_0_0, 24);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(12, __reg_11_2, __reg_11_0, __reg_11_1);
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 25; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 12, __reg_11_1, __reg_11_2, __reg_11_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 12, __reg_11_2, __reg_11_0, __reg_11_1);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 11, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 10, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 9, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 8, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 7, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 6, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 5, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 4, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_0_0);
__STORE(__h - 3, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 2, __reg_11_1, __reg_11_2, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 11, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 10, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 9, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 8, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 7, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 6, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 5, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 4, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 3, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_0_1);
__STORE(__h - 2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 1, __reg_11_2, __reg_11_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 11, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 10, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 9, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 8, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 7, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 6, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 5, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 4, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 3, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_0_2);
__STORE(__h - 1, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h + 0, __reg_11_0, __reg_11_1, __reg_0_2);
}
}
else
{
for (__h = 25; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 12, __reg_11_1, __reg_11_2, __reg_11_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 12, __reg_11_2, __reg_11_0, __reg_11_1);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 12, __reg_11_1, __reg_11_2, __reg_11_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 12, __reg_11_2, __reg_11_0, __reg_11_1);
__h++;
}
}
__global__ void kernel0_11(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 11;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 490;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
float __reg_8_0;
float __reg_8_1;
float __reg_8_2;
float __reg_9_0;
float __reg_9_1;
float __reg_9_2;
float __reg_10_0;
float __reg_10_1;
float __reg_10_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10);
const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11);
const AN5D_TYPE __storeValid = __writeValid11;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_10_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_10_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_10_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_10_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_10_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_10_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_10_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_10_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_10_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_10_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_10_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(1, __reg_10_0, __reg_10_1, __reg_10_2);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(2, __reg_10_1, __reg_10_2, __reg_10_0);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(3, __reg_10_2, __reg_10_0, __reg_10_1);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(4, __reg_10_0, __reg_10_1, __reg_10_2);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(5, __reg_10_1, __reg_10_2, __reg_10_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(6, __reg_10_2, __reg_10_0, __reg_10_1);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(7, __reg_10_0, __reg_10_1, __reg_10_2);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(8, __reg_10_1, __reg_10_2, __reg_10_0);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(9, __reg_10_2, __reg_10_0, __reg_10_1);
__LOAD(__reg_0_0, 21);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(10, __reg_10_0, __reg_10_1, __reg_10_2);
__LOAD(__reg_0_1, 22);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(11, __reg_10_1, __reg_10_2, __reg_10_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 21);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__LOAD(__reg_0_1, 22);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(11, __reg_10_1, __reg_10_2, __reg_10_0);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 23; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 11, __reg_10_0, __reg_10_1, __reg_10_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 11, __reg_10_1, __reg_10_2, __reg_10_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 10, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 9, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 8, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 7, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 6, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 5, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 4, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1);
__STORE(__h - 3, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 2, __reg_10_2, __reg_10_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 10, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 9, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 8, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 7, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 6, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 5, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 4, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 3, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2);
__STORE(__h - 2, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 1, __reg_10_0, __reg_10_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 10, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 9, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 8, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 7, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 6, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 5, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 4, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 3, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 2, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0);
__STORE(__h - 1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h + 0, __reg_10_1, __reg_10_2, __reg_0_0);
}
}
else
{
for (__h = 23; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 11, __reg_10_0, __reg_10_1, __reg_10_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 11, __reg_10_1, __reg_10_2, __reg_10_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 11, __reg_10_0, __reg_10_1, __reg_10_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 11, __reg_10_1, __reg_10_2, __reg_10_0);
__h++;
}
}
__global__ void kernel0_10(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 10;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
float __reg_8_0;
float __reg_8_1;
float __reg_8_2;
float __reg_9_0;
float __reg_9_1;
float __reg_9_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10);
const AN5D_TYPE __storeValid = __writeValid10;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_9_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_9_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_9_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_9_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_9_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_9_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_9_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_9_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_9_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_9_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(1, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(2, __reg_9_1, __reg_9_2, __reg_9_0);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(3, __reg_9_2, __reg_9_0, __reg_9_1);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(4, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(5, __reg_9_1, __reg_9_2, __reg_9_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(6, __reg_9_2, __reg_9_0, __reg_9_1);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(7, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(8, __reg_9_1, __reg_9_2, __reg_9_0);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(9, __reg_9_2, __reg_9_0, __reg_9_1);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(10, __reg_9_0, __reg_9_1, __reg_9_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(10, __reg_9_0, __reg_9_1, __reg_9_2);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2);
__STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0);
__STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 1, __reg_9_1, __reg_9_2, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1);
__STORE(__h - 1, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h + 0, __reg_9_2, __reg_9_0, __reg_0_1);
}
}
else
{
for (__h = 21; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2);
__h++;
}
}
__global__ void kernel0_9(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 9;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 494;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
float __reg_8_0;
float __reg_8_1;
float __reg_8_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __storeValid = __writeValid9;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_8_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_8_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_8_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_8_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_8_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_8_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_8_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_8_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_8_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(1, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(2, __reg_8_1, __reg_8_2, __reg_8_0);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(3, __reg_8_2, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(4, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(5, __reg_8_1, __reg_8_2, __reg_8_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(6, __reg_8_2, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(7, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(8, __reg_8_1, __reg_8_2, __reg_8_0);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(9, __reg_8_2, __reg_8_0, __reg_8_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(9, __reg_8_2, __reg_8_0, __reg_8_1);
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 19; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0);
__STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1);
__STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 1, __reg_8_2, __reg_8_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2);
__STORE(__h - 1, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h + 0, __reg_8_0, __reg_8_1, __reg_0_2);
}
}
else
{
for (__h = 19; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1);
__h++;
}
}
__global__ void kernel0_8(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __storeValid = __writeValid8;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_7_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_7_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_7_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_7_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_7_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_7_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_7_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_7_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(1, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(2, __reg_7_1, __reg_7_2, __reg_7_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(3, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(4, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(5, __reg_7_1, __reg_7_2, __reg_7_0);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(6, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(7, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(8, __reg_7_1, __reg_7_2, __reg_7_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(8, __reg_7_1, __reg_7_2, __reg_7_0);
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1);
__STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2);
__STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_0_0);
}
}
else
{
for (__h = 17; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0);
__h++;
}
}
__global__ void kernel0_7(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 498;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __storeValid = __writeValid7;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_6_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_6_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_6_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_6_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_6_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_6_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_6_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(1, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(2, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(3, __reg_6_2, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(5, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(6, __reg_6_2, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(7, __reg_6_0, __reg_6_1, __reg_6_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(7, __reg_6_0, __reg_6_1, __reg_6_2);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 15; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2);
__STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h + 0, __reg_6_2, __reg_6_0, __reg_0_1);
}
}
else
{
for (__h = 15; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
}
}
__global__ void kernel0_6(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __storeValid = __writeValid6;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_5_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_5_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_5_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_5_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_5_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_5_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(2, __reg_5_1, __reg_5_2, __reg_5_0);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(3, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(5, __reg_5_1, __reg_5_2, __reg_5_0);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(6, __reg_5_2, __reg_5_0, __reg_5_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(6, __reg_5_2, __reg_5_0, __reg_5_1);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 1, __reg_5_2, __reg_5_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__STORE(__h - 1, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h + 0, __reg_5_0, __reg_5_1, __reg_0_2);
}
}
else
{
for (__h = 13; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1);
__h++;
}
}
__global__ void kernel0_5(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 502;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __storeValid = __writeValid5;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_4_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_4_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_4_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_4_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_4_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(1, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(3, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(5, __reg_4_1, __reg_4_2, __reg_4_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(5, __reg_4_1, __reg_4_2, __reg_4_0);
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 11; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h + 0, __reg_4_1, __reg_4_2, __reg_0_0);
}
}
else
{
for (__h = 11; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0);
__h++;
}
}
__global__ void kernel0_4(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __storeValid = __writeValid4;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_3_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_3_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_3_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_3_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(3, __reg_3_2, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(4, __reg_3_0, __reg_3_1, __reg_3_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(4, __reg_3_0, __reg_3_1, __reg_3_2);
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h + 0, __reg_3_2, __reg_3_0, __reg_0_1);
}
}
else
{
for (__h = 9; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
}
}
__global__ void kernel0_3(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 506;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __storeValid = __writeValid3;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_2_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_2_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(2, __reg_2_1, __reg_2_2, __reg_2_0);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(3, __reg_2_2, __reg_2_0, __reg_2_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(3, __reg_2_2, __reg_2_0, __reg_2_1);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 7; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h + 0, __reg_2_0, __reg_2_1, __reg_0_2);
}
}
else
{
for (__h = 7; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1);
__h++;
}
}
__global__ void kernel0_2(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __storeValid = __writeValid2;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_1_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_1_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(1, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(2, __reg_1_1, __reg_1_2, __reg_1_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(2, __reg_1_1, __reg_1_2, __reg_1_0);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h + 0, __reg_1_1, __reg_1_2, __reg_0_0);
}
}
else
{
for (__h = 5; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0);
__h++;
}
}
__global__ void kernel0_1(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 510;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__STORE(1, __reg_0_0, __reg_0_1, __reg_0_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__STORE(1, __reg_0_0, __reg_0_1, __reg_0_2);
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 3; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h + 0, __reg_0_2, __reg_0_0, __reg_0_1);
}
}
else
{
for (__h = 3; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
}
}
|
dfe02b6bdab4e048b94d236b82149093ea49839f.cu
|
#include "j2d5pt-512-16-128_kernel.hu"
__device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; }
__global__ void kernel0_16(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 16;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 480;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
float __reg_8_0;
float __reg_8_1;
float __reg_8_2;
float __reg_9_0;
float __reg_9_1;
float __reg_9_2;
float __reg_10_0;
float __reg_10_1;
float __reg_10_2;
float __reg_11_0;
float __reg_11_1;
float __reg_11_2;
float __reg_12_0;
float __reg_12_1;
float __reg_12_2;
float __reg_13_0;
float __reg_13_1;
float __reg_13_2;
float __reg_14_0;
float __reg_14_1;
float __reg_14_2;
float __reg_15_0;
float __reg_15_1;
float __reg_15_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10);
const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11);
const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12);
const AN5D_TYPE __writeValid13 = __updateValid && __local_c2 >= (__halo2 * 13) && __local_c2 < __side2LenOl - (__halo2 * 13);
const AN5D_TYPE __writeValid14 = __updateValid && __local_c2 >= (__halo2 * 14) && __local_c2 < __side2LenOl - (__halo2 * 14);
const AN5D_TYPE __writeValid15 = __updateValid && __local_c2 >= (__halo2 * 15) && __local_c2 < __side2LenOl - (__halo2 * 15);
const AN5D_TYPE __writeValid16 = __updateValid && __local_c2 >= (__halo2 * 16) && __local_c2 < __side2LenOl - (__halo2 * 16);
const AN5D_TYPE __storeValid = __writeValid16;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC11(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid11) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC12(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid12) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC13(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid13) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC14(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid14) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC15(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid15) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_15_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_15_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_15_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_15_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_15_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_15_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_15_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_15_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_15_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_15_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_15_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_15_0, __reg_10_1, __reg_10_2);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_15_0, __reg_11_1, __reg_11_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_15_0, __reg_12_1, __reg_12_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_15_0, __reg_13_1, __reg_13_2);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_15_0, __reg_14_1, __reg_14_2);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(1, __reg_15_0, __reg_15_1, __reg_15_2);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(2, __reg_15_1, __reg_15_2, __reg_15_0);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(3, __reg_15_2, __reg_15_0, __reg_15_1);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(4, __reg_15_0, __reg_15_1, __reg_15_2);
__LOAD(__reg_0_0, 21);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(5, __reg_15_1, __reg_15_2, __reg_15_0);
__LOAD(__reg_0_1, 22);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(6, __reg_15_2, __reg_15_0, __reg_15_1);
__LOAD(__reg_0_2, 23);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(7, __reg_15_0, __reg_15_1, __reg_15_2);
__LOAD(__reg_0_0, 24);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(8, __reg_15_1, __reg_15_2, __reg_15_0);
__LOAD(__reg_0_1, 25);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(9, __reg_15_2, __reg_15_0, __reg_15_1);
__LOAD(__reg_0_2, 26);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(10, __reg_15_0, __reg_15_1, __reg_15_2);
__LOAD(__reg_0_0, 27);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(11, __reg_15_1, __reg_15_2, __reg_15_0);
__LOAD(__reg_0_1, 28);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(12, __reg_15_2, __reg_15_0, __reg_15_1);
__LOAD(__reg_0_2, 29);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(13, __reg_15_0, __reg_15_1, __reg_15_2);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(14, __reg_15_1, __reg_15_2, __reg_15_0);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(15, __reg_15_2, __reg_15_0, __reg_15_1);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(16, __reg_15_0, __reg_15_1, __reg_15_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 21);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__LOAD(__reg_0_1, 22);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__LOAD(__reg_0_2, 23);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__LOAD(__reg_0_0, 24);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__LOAD(__reg_0_1, 25);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__LOAD(__reg_0_2, 26);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__LOAD(__reg_0_0, 27);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__LOAD(__reg_0_1, 28);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__LOAD(__reg_0_2, 29);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(16, __reg_15_0, __reg_15_1, __reg_15_2);
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 33; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(__h - 16, __reg_15_1, __reg_15_2, __reg_15_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(__h - 16, __reg_15_2, __reg_15_0, __reg_15_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(__h - 16, __reg_15_0, __reg_15_1, __reg_15_2);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(__h - 16, __reg_15_1, __reg_15_2, __reg_15_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(__h - 15, __reg_15_2, __reg_15_0, __reg_15_1);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(__h - 14, __reg_15_0, __reg_15_1, __reg_15_2);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(__h - 13, __reg_15_1, __reg_15_2, __reg_15_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(__h - 12, __reg_15_2, __reg_15_0, __reg_15_1);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(__h - 11, __reg_15_0, __reg_15_1, __reg_15_2);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(__h - 10, __reg_15_1, __reg_15_2, __reg_15_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(__h - 9, __reg_15_2, __reg_15_0, __reg_15_1);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(__h - 8, __reg_15_0, __reg_15_1, __reg_15_2);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_0_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(__h - 7, __reg_15_1, __reg_15_2, __reg_15_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_0_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(__h - 6, __reg_15_2, __reg_15_0, __reg_15_1);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_0_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(__h - 5, __reg_15_0, __reg_15_1, __reg_15_2);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_0_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(__h - 4, __reg_15_1, __reg_15_2, __reg_15_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_0_2);
__STORE(__h - 3, __reg_15_2, __reg_15_0, __reg_15_1);
__STORE(__h - 2, __reg_15_0, __reg_15_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(__h - 16, __reg_15_1, __reg_15_2, __reg_15_0);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(__h - 15, __reg_15_2, __reg_15_0, __reg_15_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(__h - 14, __reg_15_0, __reg_15_1, __reg_15_2);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(__h - 13, __reg_15_1, __reg_15_2, __reg_15_0);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(__h - 12, __reg_15_2, __reg_15_0, __reg_15_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(__h - 11, __reg_15_0, __reg_15_1, __reg_15_2);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(__h - 10, __reg_15_1, __reg_15_2, __reg_15_0);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(__h - 9, __reg_15_2, __reg_15_0, __reg_15_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(__h - 8, __reg_15_0, __reg_15_1, __reg_15_2);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(__h - 7, __reg_15_1, __reg_15_2, __reg_15_0);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_0_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(__h - 6, __reg_15_2, __reg_15_0, __reg_15_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_0_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(__h - 5, __reg_15_0, __reg_15_1, __reg_15_2);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_0_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(__h - 4, __reg_15_1, __reg_15_2, __reg_15_0);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_0_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(__h - 3, __reg_15_2, __reg_15_0, __reg_15_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_0_0);
__STORE(__h - 2, __reg_15_0, __reg_15_1, __reg_15_2);
__STORE(__h - 1, __reg_15_1, __reg_15_2, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(__h - 16, __reg_15_1, __reg_15_2, __reg_15_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(__h - 15, __reg_15_2, __reg_15_0, __reg_15_1);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(__h - 14, __reg_15_0, __reg_15_1, __reg_15_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(__h - 13, __reg_15_1, __reg_15_2, __reg_15_0);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(__h - 12, __reg_15_2, __reg_15_0, __reg_15_1);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(__h - 11, __reg_15_0, __reg_15_1, __reg_15_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(__h - 10, __reg_15_1, __reg_15_2, __reg_15_0);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(__h - 9, __reg_15_2, __reg_15_0, __reg_15_1);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(__h - 8, __reg_15_0, __reg_15_1, __reg_15_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(__h - 7, __reg_15_1, __reg_15_2, __reg_15_0);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(__h - 6, __reg_15_2, __reg_15_0, __reg_15_1);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_0_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(__h - 5, __reg_15_0, __reg_15_1, __reg_15_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_0_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(__h - 4, __reg_15_1, __reg_15_2, __reg_15_0);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_0_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(__h - 3, __reg_15_2, __reg_15_0, __reg_15_1);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_0_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(__h - 2, __reg_15_0, __reg_15_1, __reg_15_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_0_1);
__STORE(__h - 1, __reg_15_1, __reg_15_2, __reg_15_0);
__STORE(__h + 0, __reg_15_2, __reg_15_0, __reg_0_1);
}
}
else
{
for (__h = 33; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(__h - 16, __reg_15_1, __reg_15_2, __reg_15_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(__h - 16, __reg_15_2, __reg_15_0, __reg_15_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(__h - 16, __reg_15_0, __reg_15_1, __reg_15_2);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(__h - 16, __reg_15_1, __reg_15_2, __reg_15_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(__h - 16, __reg_15_2, __reg_15_0, __reg_15_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(__h - 16, __reg_15_0, __reg_15_1, __reg_15_2);
__h++;
}
}
__global__ void kernel0_15(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 15;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 482;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
float __reg_8_0;
float __reg_8_1;
float __reg_8_2;
float __reg_9_0;
float __reg_9_1;
float __reg_9_2;
float __reg_10_0;
float __reg_10_1;
float __reg_10_2;
float __reg_11_0;
float __reg_11_1;
float __reg_11_2;
float __reg_12_0;
float __reg_12_1;
float __reg_12_2;
float __reg_13_0;
float __reg_13_1;
float __reg_13_2;
float __reg_14_0;
float __reg_14_1;
float __reg_14_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10);
const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11);
const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12);
const AN5D_TYPE __writeValid13 = __updateValid && __local_c2 >= (__halo2 * 13) && __local_c2 < __side2LenOl - (__halo2 * 13);
const AN5D_TYPE __writeValid14 = __updateValid && __local_c2 >= (__halo2 * 14) && __local_c2 < __side2LenOl - (__halo2 * 14);
const AN5D_TYPE __writeValid15 = __updateValid && __local_c2 >= (__halo2 * 15) && __local_c2 < __side2LenOl - (__halo2 * 15);
const AN5D_TYPE __storeValid = __writeValid15;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC11(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid11) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC12(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid12) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC13(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid13) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC14(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid14) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_14_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_14_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_14_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_14_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_14_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_14_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_14_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_14_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_14_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_14_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_14_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_14_0, __reg_10_1, __reg_10_2);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_14_0, __reg_11_1, __reg_11_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_14_0, __reg_12_1, __reg_12_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_14_0, __reg_13_1, __reg_13_2);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(1, __reg_14_0, __reg_14_1, __reg_14_2);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(2, __reg_14_1, __reg_14_2, __reg_14_0);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(3, __reg_14_2, __reg_14_0, __reg_14_1);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(4, __reg_14_0, __reg_14_1, __reg_14_2);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(5, __reg_14_1, __reg_14_2, __reg_14_0);
__LOAD(__reg_0_0, 21);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(6, __reg_14_2, __reg_14_0, __reg_14_1);
__LOAD(__reg_0_1, 22);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(7, __reg_14_0, __reg_14_1, __reg_14_2);
__LOAD(__reg_0_2, 23);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(8, __reg_14_1, __reg_14_2, __reg_14_0);
__LOAD(__reg_0_0, 24);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(9, __reg_14_2, __reg_14_0, __reg_14_1);
__LOAD(__reg_0_1, 25);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(10, __reg_14_0, __reg_14_1, __reg_14_2);
__LOAD(__reg_0_2, 26);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(11, __reg_14_1, __reg_14_2, __reg_14_0);
__LOAD(__reg_0_0, 27);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(12, __reg_14_2, __reg_14_0, __reg_14_1);
__LOAD(__reg_0_1, 28);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(13, __reg_14_0, __reg_14_1, __reg_14_2);
__LOAD(__reg_0_2, 29);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(14, __reg_14_1, __reg_14_2, __reg_14_0);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(15, __reg_14_2, __reg_14_0, __reg_14_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 21);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__LOAD(__reg_0_1, 22);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__LOAD(__reg_0_2, 23);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__LOAD(__reg_0_0, 24);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__LOAD(__reg_0_1, 25);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__LOAD(__reg_0_2, 26);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__LOAD(__reg_0_0, 27);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__LOAD(__reg_0_1, 28);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__LOAD(__reg_0_2, 29);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(15, __reg_14_2, __reg_14_0, __reg_14_1);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 31; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(__h - 15, __reg_14_0, __reg_14_1, __reg_14_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(__h - 15, __reg_14_1, __reg_14_2, __reg_14_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(__h - 15, __reg_14_2, __reg_14_0, __reg_14_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(__h - 15, __reg_14_0, __reg_14_1, __reg_14_2);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(__h - 14, __reg_14_1, __reg_14_2, __reg_14_0);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(__h - 13, __reg_14_2, __reg_14_0, __reg_14_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(__h - 12, __reg_14_0, __reg_14_1, __reg_14_2);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(__h - 11, __reg_14_1, __reg_14_2, __reg_14_0);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(__h - 10, __reg_14_2, __reg_14_0, __reg_14_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(__h - 9, __reg_14_0, __reg_14_1, __reg_14_2);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(__h - 8, __reg_14_1, __reg_14_2, __reg_14_0);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(__h - 7, __reg_14_2, __reg_14_0, __reg_14_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_0_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(__h - 6, __reg_14_0, __reg_14_1, __reg_14_2);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_0_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(__h - 5, __reg_14_1, __reg_14_2, __reg_14_0);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_0_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(__h - 4, __reg_14_2, __reg_14_0, __reg_14_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_0_0);
__STORE(__h - 3, __reg_14_0, __reg_14_1, __reg_14_2);
__STORE(__h - 2, __reg_14_1, __reg_14_2, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(__h - 15, __reg_14_0, __reg_14_1, __reg_14_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(__h - 14, __reg_14_1, __reg_14_2, __reg_14_0);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(__h - 13, __reg_14_2, __reg_14_0, __reg_14_1);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(__h - 12, __reg_14_0, __reg_14_1, __reg_14_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(__h - 11, __reg_14_1, __reg_14_2, __reg_14_0);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(__h - 10, __reg_14_2, __reg_14_0, __reg_14_1);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(__h - 9, __reg_14_0, __reg_14_1, __reg_14_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(__h - 8, __reg_14_1, __reg_14_2, __reg_14_0);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(__h - 7, __reg_14_2, __reg_14_0, __reg_14_1);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(__h - 6, __reg_14_0, __reg_14_1, __reg_14_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_0_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(__h - 5, __reg_14_1, __reg_14_2, __reg_14_0);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_0_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(__h - 4, __reg_14_2, __reg_14_0, __reg_14_1);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_0_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(__h - 3, __reg_14_0, __reg_14_1, __reg_14_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_0_1);
__STORE(__h - 2, __reg_14_1, __reg_14_2, __reg_14_0);
__STORE(__h - 1, __reg_14_2, __reg_14_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(__h - 15, __reg_14_0, __reg_14_1, __reg_14_2);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(__h - 14, __reg_14_1, __reg_14_2, __reg_14_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(__h - 13, __reg_14_2, __reg_14_0, __reg_14_1);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(__h - 12, __reg_14_0, __reg_14_1, __reg_14_2);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(__h - 11, __reg_14_1, __reg_14_2, __reg_14_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(__h - 10, __reg_14_2, __reg_14_0, __reg_14_1);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(__h - 9, __reg_14_0, __reg_14_1, __reg_14_2);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(__h - 8, __reg_14_1, __reg_14_2, __reg_14_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(__h - 7, __reg_14_2, __reg_14_0, __reg_14_1);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(__h - 6, __reg_14_0, __reg_14_1, __reg_14_2);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(__h - 5, __reg_14_1, __reg_14_2, __reg_14_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_0_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(__h - 4, __reg_14_2, __reg_14_0, __reg_14_1);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_0_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(__h - 3, __reg_14_0, __reg_14_1, __reg_14_2);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_0_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(__h - 2, __reg_14_1, __reg_14_2, __reg_14_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_0_2);
__STORE(__h - 1, __reg_14_2, __reg_14_0, __reg_14_1);
__STORE(__h + 0, __reg_14_0, __reg_14_1, __reg_0_2);
}
}
else
{
for (__h = 31; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(__h - 15, __reg_14_0, __reg_14_1, __reg_14_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(__h - 15, __reg_14_1, __reg_14_2, __reg_14_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(__h - 15, __reg_14_2, __reg_14_0, __reg_14_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(__h - 15, __reg_14_0, __reg_14_1, __reg_14_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(__h - 15, __reg_14_1, __reg_14_2, __reg_14_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(__h - 15, __reg_14_2, __reg_14_0, __reg_14_1);
__h++;
}
}
__global__ void kernel0_14(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 14;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 484;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
float __reg_8_0;
float __reg_8_1;
float __reg_8_2;
float __reg_9_0;
float __reg_9_1;
float __reg_9_2;
float __reg_10_0;
float __reg_10_1;
float __reg_10_2;
float __reg_11_0;
float __reg_11_1;
float __reg_11_2;
float __reg_12_0;
float __reg_12_1;
float __reg_12_2;
float __reg_13_0;
float __reg_13_1;
float __reg_13_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10);
const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11);
const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12);
const AN5D_TYPE __writeValid13 = __updateValid && __local_c2 >= (__halo2 * 13) && __local_c2 < __side2LenOl - (__halo2 * 13);
const AN5D_TYPE __writeValid14 = __updateValid && __local_c2 >= (__halo2 * 14) && __local_c2 < __side2LenOl - (__halo2 * 14);
const AN5D_TYPE __storeValid = __writeValid14;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC11(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid11) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC12(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid12) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC13(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid13) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_13_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_13_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_13_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_13_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_13_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_13_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_13_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_13_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_13_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_13_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_13_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_13_0, __reg_10_1, __reg_10_2);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_13_0, __reg_11_1, __reg_11_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_13_0, __reg_12_1, __reg_12_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(1, __reg_13_0, __reg_13_1, __reg_13_2);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(2, __reg_13_1, __reg_13_2, __reg_13_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(3, __reg_13_2, __reg_13_0, __reg_13_1);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(4, __reg_13_0, __reg_13_1, __reg_13_2);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(5, __reg_13_1, __reg_13_2, __reg_13_0);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(6, __reg_13_2, __reg_13_0, __reg_13_1);
__LOAD(__reg_0_0, 21);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(7, __reg_13_0, __reg_13_1, __reg_13_2);
__LOAD(__reg_0_1, 22);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(8, __reg_13_1, __reg_13_2, __reg_13_0);
__LOAD(__reg_0_2, 23);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(9, __reg_13_2, __reg_13_0, __reg_13_1);
__LOAD(__reg_0_0, 24);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(10, __reg_13_0, __reg_13_1, __reg_13_2);
__LOAD(__reg_0_1, 25);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(11, __reg_13_1, __reg_13_2, __reg_13_0);
__LOAD(__reg_0_2, 26);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(12, __reg_13_2, __reg_13_0, __reg_13_1);
__LOAD(__reg_0_0, 27);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(13, __reg_13_0, __reg_13_1, __reg_13_2);
__LOAD(__reg_0_1, 28);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(14, __reg_13_1, __reg_13_2, __reg_13_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 21);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__LOAD(__reg_0_1, 22);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__LOAD(__reg_0_2, 23);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__LOAD(__reg_0_0, 24);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__LOAD(__reg_0_1, 25);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__LOAD(__reg_0_2, 26);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__LOAD(__reg_0_0, 27);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__LOAD(__reg_0_1, 28);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(14, __reg_13_1, __reg_13_2, __reg_13_0);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 29; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(__h - 14, __reg_13_2, __reg_13_0, __reg_13_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(__h - 14, __reg_13_0, __reg_13_1, __reg_13_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(__h - 14, __reg_13_1, __reg_13_2, __reg_13_0);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(__h - 14, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(__h - 13, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(__h - 12, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(__h - 11, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(__h - 10, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(__h - 9, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(__h - 8, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(__h - 7, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(__h - 6, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_0_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(__h - 5, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_0_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(__h - 4, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_0_1);
__STORE(__h - 3, __reg_13_1, __reg_13_2, __reg_13_0);
__STORE(__h - 2, __reg_13_2, __reg_13_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(__h - 14, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(__h - 13, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(__h - 12, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(__h - 11, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(__h - 10, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(__h - 9, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(__h - 8, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(__h - 7, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(__h - 6, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(__h - 5, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_0_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(__h - 4, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_0_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(__h - 3, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_0_2);
__STORE(__h - 2, __reg_13_2, __reg_13_0, __reg_13_1);
__STORE(__h - 1, __reg_13_0, __reg_13_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(__h - 14, __reg_13_2, __reg_13_0, __reg_13_1);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(__h - 13, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(__h - 12, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(__h - 11, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(__h - 10, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(__h - 9, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(__h - 8, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(__h - 7, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(__h - 6, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(__h - 5, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(__h - 4, __reg_13_0, __reg_13_1, __reg_13_2);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_0_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(__h - 3, __reg_13_1, __reg_13_2, __reg_13_0);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_0_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(__h - 2, __reg_13_2, __reg_13_0, __reg_13_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_0_0);
__STORE(__h - 1, __reg_13_0, __reg_13_1, __reg_13_2);
__STORE(__h + 0, __reg_13_1, __reg_13_2, __reg_0_0);
}
}
else
{
for (__h = 29; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(__h - 14, __reg_13_2, __reg_13_0, __reg_13_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(__h - 14, __reg_13_0, __reg_13_1, __reg_13_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(__h - 14, __reg_13_1, __reg_13_2, __reg_13_0);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(__h - 14, __reg_13_2, __reg_13_0, __reg_13_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(__h - 14, __reg_13_0, __reg_13_1, __reg_13_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(__h - 14, __reg_13_1, __reg_13_2, __reg_13_0);
__h++;
}
}
__global__ void kernel0_13(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 13;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 486;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
float __reg_8_0;
float __reg_8_1;
float __reg_8_2;
float __reg_9_0;
float __reg_9_1;
float __reg_9_2;
float __reg_10_0;
float __reg_10_1;
float __reg_10_2;
float __reg_11_0;
float __reg_11_1;
float __reg_11_2;
float __reg_12_0;
float __reg_12_1;
float __reg_12_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10);
const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11);
const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12);
const AN5D_TYPE __writeValid13 = __updateValid && __local_c2 >= (__halo2 * 13) && __local_c2 < __side2LenOl - (__halo2 * 13);
const AN5D_TYPE __storeValid = __writeValid13;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC11(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid11) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC12(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid12) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_12_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_12_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_12_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_12_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_12_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_12_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_12_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_12_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_12_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_12_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_12_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_12_0, __reg_10_1, __reg_10_2);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_12_0, __reg_11_1, __reg_11_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(1, __reg_12_0, __reg_12_1, __reg_12_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(2, __reg_12_1, __reg_12_2, __reg_12_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(3, __reg_12_2, __reg_12_0, __reg_12_1);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(4, __reg_12_0, __reg_12_1, __reg_12_2);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(5, __reg_12_1, __reg_12_2, __reg_12_0);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(6, __reg_12_2, __reg_12_0, __reg_12_1);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(7, __reg_12_0, __reg_12_1, __reg_12_2);
__LOAD(__reg_0_0, 21);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(8, __reg_12_1, __reg_12_2, __reg_12_0);
__LOAD(__reg_0_1, 22);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(9, __reg_12_2, __reg_12_0, __reg_12_1);
__LOAD(__reg_0_2, 23);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(10, __reg_12_0, __reg_12_1, __reg_12_2);
__LOAD(__reg_0_0, 24);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(11, __reg_12_1, __reg_12_2, __reg_12_0);
__LOAD(__reg_0_1, 25);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(12, __reg_12_2, __reg_12_0, __reg_12_1);
__LOAD(__reg_0_2, 26);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(13, __reg_12_0, __reg_12_1, __reg_12_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 21);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__LOAD(__reg_0_1, 22);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__LOAD(__reg_0_2, 23);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__LOAD(__reg_0_0, 24);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__LOAD(__reg_0_1, 25);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__LOAD(__reg_0_2, 26);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(13, __reg_12_0, __reg_12_1, __reg_12_2);
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 27; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 13, __reg_12_2, __reg_12_0, __reg_12_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 13, __reg_12_0, __reg_12_1, __reg_12_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 12, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 11, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 10, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 9, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 8, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 7, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 6, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 5, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_0_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 4, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_0_2);
__STORE(__h - 3, __reg_12_2, __reg_12_0, __reg_12_1);
__STORE(__h - 2, __reg_12_0, __reg_12_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 12, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 11, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 10, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 9, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 8, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 7, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 6, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 5, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 4, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_0_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 3, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_0_0);
__STORE(__h - 2, __reg_12_0, __reg_12_1, __reg_12_2);
__STORE(__h - 1, __reg_12_1, __reg_12_2, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 12, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 11, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 10, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 9, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 8, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 7, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 6, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 5, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 4, __reg_12_1, __reg_12_2, __reg_12_0);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 3, __reg_12_2, __reg_12_0, __reg_12_1);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_0_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 2, __reg_12_0, __reg_12_1, __reg_12_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_0_1);
__STORE(__h - 1, __reg_12_1, __reg_12_2, __reg_12_0);
__STORE(__h + 0, __reg_12_2, __reg_12_0, __reg_0_1);
}
}
else
{
for (__h = 27; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 13, __reg_12_2, __reg_12_0, __reg_12_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 13, __reg_12_0, __reg_12_1, __reg_12_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 13, __reg_12_2, __reg_12_0, __reg_12_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 13, __reg_12_0, __reg_12_1, __reg_12_2);
__h++;
}
}
__global__ void kernel0_12(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 12;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
float __reg_8_0;
float __reg_8_1;
float __reg_8_2;
float __reg_9_0;
float __reg_9_1;
float __reg_9_2;
float __reg_10_0;
float __reg_10_1;
float __reg_10_2;
float __reg_11_0;
float __reg_11_1;
float __reg_11_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10);
const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11);
const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12);
const AN5D_TYPE __storeValid = __writeValid12;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC11(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid11) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_11_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_11_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_11_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_11_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_11_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_11_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_11_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_11_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_11_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_11_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_11_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_11_0, __reg_10_1, __reg_10_2);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(1, __reg_11_0, __reg_11_1, __reg_11_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(2, __reg_11_1, __reg_11_2, __reg_11_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(3, __reg_11_2, __reg_11_0, __reg_11_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(4, __reg_11_0, __reg_11_1, __reg_11_2);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(5, __reg_11_1, __reg_11_2, __reg_11_0);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(6, __reg_11_2, __reg_11_0, __reg_11_1);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(7, __reg_11_0, __reg_11_1, __reg_11_2);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(8, __reg_11_1, __reg_11_2, __reg_11_0);
__LOAD(__reg_0_0, 21);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(9, __reg_11_2, __reg_11_0, __reg_11_1);
__LOAD(__reg_0_1, 22);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(10, __reg_11_0, __reg_11_1, __reg_11_2);
__LOAD(__reg_0_2, 23);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(11, __reg_11_1, __reg_11_2, __reg_11_0);
__LOAD(__reg_0_0, 24);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(12, __reg_11_2, __reg_11_0, __reg_11_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 21);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__LOAD(__reg_0_1, 22);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__LOAD(__reg_0_2, 23);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__LOAD(__reg_0_0, 24);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(12, __reg_11_2, __reg_11_0, __reg_11_1);
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 25; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 12, __reg_11_1, __reg_11_2, __reg_11_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 12, __reg_11_2, __reg_11_0, __reg_11_1);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 11, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 10, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 9, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 8, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 7, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 6, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 5, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 4, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_0_0);
__STORE(__h - 3, __reg_11_0, __reg_11_1, __reg_11_2);
__STORE(__h - 2, __reg_11_1, __reg_11_2, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 11, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 10, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 9, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 8, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 7, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 6, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 5, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 4, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 3, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_0_1);
__STORE(__h - 2, __reg_11_1, __reg_11_2, __reg_11_0);
__STORE(__h - 1, __reg_11_2, __reg_11_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 11, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 10, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 9, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 8, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 7, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 6, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 5, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 4, __reg_11_2, __reg_11_0, __reg_11_1);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 3, __reg_11_0, __reg_11_1, __reg_11_2);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 2, __reg_11_1, __reg_11_2, __reg_11_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_0_2);
__STORE(__h - 1, __reg_11_2, __reg_11_0, __reg_11_1);
__STORE(__h + 0, __reg_11_0, __reg_11_1, __reg_0_2);
}
}
else
{
for (__h = 25; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 12, __reg_11_1, __reg_11_2, __reg_11_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 12, __reg_11_2, __reg_11_0, __reg_11_1);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 12, __reg_11_1, __reg_11_2, __reg_11_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h - 12, __reg_11_2, __reg_11_0, __reg_11_1);
__h++;
}
}
__global__ void kernel0_11(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 11;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 490;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
float __reg_8_0;
float __reg_8_1;
float __reg_8_2;
float __reg_9_0;
float __reg_9_1;
float __reg_9_2;
float __reg_10_0;
float __reg_10_1;
float __reg_10_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10);
const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11);
const AN5D_TYPE __storeValid = __writeValid11;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_10_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_10_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_10_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_10_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_10_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_10_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_10_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_10_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_10_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_10_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_10_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(1, __reg_10_0, __reg_10_1, __reg_10_2);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(2, __reg_10_1, __reg_10_2, __reg_10_0);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(3, __reg_10_2, __reg_10_0, __reg_10_1);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(4, __reg_10_0, __reg_10_1, __reg_10_2);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(5, __reg_10_1, __reg_10_2, __reg_10_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(6, __reg_10_2, __reg_10_0, __reg_10_1);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(7, __reg_10_0, __reg_10_1, __reg_10_2);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(8, __reg_10_1, __reg_10_2, __reg_10_0);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(9, __reg_10_2, __reg_10_0, __reg_10_1);
__LOAD(__reg_0_0, 21);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(10, __reg_10_0, __reg_10_1, __reg_10_2);
__LOAD(__reg_0_1, 22);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(11, __reg_10_1, __reg_10_2, __reg_10_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 21);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__LOAD(__reg_0_1, 22);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(11, __reg_10_1, __reg_10_2, __reg_10_0);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 23; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 11, __reg_10_0, __reg_10_1, __reg_10_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 11, __reg_10_1, __reg_10_2, __reg_10_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 10, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 9, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 8, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 7, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 6, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 5, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 4, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1);
__STORE(__h - 3, __reg_10_1, __reg_10_2, __reg_10_0);
__STORE(__h - 2, __reg_10_2, __reg_10_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 10, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 9, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 8, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 7, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 6, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 5, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 4, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 3, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2);
__STORE(__h - 2, __reg_10_2, __reg_10_0, __reg_10_1);
__STORE(__h - 1, __reg_10_0, __reg_10_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 10, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 9, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 8, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 7, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 6, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 5, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 4, __reg_10_0, __reg_10_1, __reg_10_2);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 3, __reg_10_1, __reg_10_2, __reg_10_0);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 2, __reg_10_2, __reg_10_0, __reg_10_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0);
__STORE(__h - 1, __reg_10_0, __reg_10_1, __reg_10_2);
__STORE(__h + 0, __reg_10_1, __reg_10_2, __reg_0_0);
}
}
else
{
for (__h = 23; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 11, __reg_10_0, __reg_10_1, __reg_10_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 11, __reg_10_1, __reg_10_2, __reg_10_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h - 11, __reg_10_0, __reg_10_1, __reg_10_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 11, __reg_10_1, __reg_10_2, __reg_10_0);
__h++;
}
}
__global__ void kernel0_10(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 10;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
float __reg_8_0;
float __reg_8_1;
float __reg_8_2;
float __reg_9_0;
float __reg_9_1;
float __reg_9_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10);
const AN5D_TYPE __storeValid = __writeValid10;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_9_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_9_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_9_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_9_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_9_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_9_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_9_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_9_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_9_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_9_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(1, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(2, __reg_9_1, __reg_9_2, __reg_9_0);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(3, __reg_9_2, __reg_9_0, __reg_9_1);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(4, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(5, __reg_9_1, __reg_9_2, __reg_9_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(6, __reg_9_2, __reg_9_0, __reg_9_1);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(7, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(8, __reg_9_1, __reg_9_2, __reg_9_0);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(9, __reg_9_2, __reg_9_0, __reg_9_1);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(10, __reg_9_0, __reg_9_1, __reg_9_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_1, 19);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 20);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(10, __reg_9_0, __reg_9_1, __reg_9_2);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2);
__STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1);
__STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0);
__STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h - 1, __reg_9_1, __reg_9_2, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1);
__STORE(__h - 1, __reg_9_1, __reg_9_2, __reg_9_0);
__STORE(__h + 0, __reg_9_2, __reg_9_0, __reg_0_1);
}
}
else
{
for (__h = 21; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2);
__h++;
}
}
__global__ void kernel0_9(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 9;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 494;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
float __reg_8_0;
float __reg_8_1;
float __reg_8_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __storeValid = __writeValid9;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_8_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_8_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_8_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_8_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_8_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_8_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_8_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_8_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_8_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(1, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(2, __reg_8_1, __reg_8_2, __reg_8_0);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(3, __reg_8_2, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(4, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(5, __reg_8_1, __reg_8_2, __reg_8_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(6, __reg_8_2, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(7, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(8, __reg_8_1, __reg_8_2, __reg_8_0);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(9, __reg_8_2, __reg_8_0, __reg_8_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, 18);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(9, __reg_8_2, __reg_8_0, __reg_8_1);
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 19; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0);
__STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1);
__STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_8_0);
__STORE(__h - 1, __reg_8_2, __reg_8_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_8_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2);
__STORE(__h - 1, __reg_8_2, __reg_8_0, __reg_8_1);
__STORE(__h + 0, __reg_8_0, __reg_8_1, __reg_0_2);
}
}
else
{
for (__h = 19; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1);
__h++;
}
}
__global__ void kernel0_8(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __storeValid = __writeValid8;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_7_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_7_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_7_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_7_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_7_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_7_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_7_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_7_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(1, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(2, __reg_7_1, __reg_7_2, __reg_7_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(3, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(4, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(5, __reg_7_1, __reg_7_2, __reg_7_0);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(6, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(7, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(8, __reg_7_1, __reg_7_2, __reg_7_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(8, __reg_7_1, __reg_7_2, __reg_7_0);
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1);
__STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0);
__STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2);
__STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_7_1);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_7_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_0_0);
}
}
else
{
for (__h = 17; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0);
__h++;
}
}
__global__ void kernel0_7(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 498;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __storeValid = __writeValid7;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_6_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_6_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_6_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_6_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_6_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_6_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_6_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(1, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(2, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(3, __reg_6_2, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(5, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(6, __reg_6_2, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(7, __reg_6_0, __reg_6_1, __reg_6_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 13);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 14);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(7, __reg_6_0, __reg_6_1, __reg_6_2);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 15; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2);
__STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_0);
__STORE(__h + 0, __reg_6_2, __reg_6_0, __reg_0_1);
}
}
else
{
for (__h = 15; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
}
}
__global__ void kernel0_6(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __storeValid = __writeValid6;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_5_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_5_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_5_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_5_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_5_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_5_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(1, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(2, __reg_5_1, __reg_5_2, __reg_5_0);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(3, __reg_5_2, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(5, __reg_5_1, __reg_5_2, __reg_5_0);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(6, __reg_5_2, __reg_5_0, __reg_5_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 12);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(6, __reg_5_2, __reg_5_0, __reg_5_1);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_0);
__STORE(__h - 1, __reg_5_2, __reg_5_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2);
__STORE(__h - 1, __reg_5_2, __reg_5_0, __reg_5_1);
__STORE(__h + 0, __reg_5_0, __reg_5_1, __reg_0_2);
}
}
else
{
for (__h = 13; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1);
__h++;
}
}
__global__ void kernel0_5(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 502;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __storeValid = __writeValid5;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_4_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_4_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_4_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_4_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_4_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(1, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(3, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(5, __reg_4_1, __reg_4_2, __reg_4_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(5, __reg_4_1, __reg_4_2, __reg_4_0);
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 11; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h + 0, __reg_4_1, __reg_4_2, __reg_0_0);
}
}
else
{
for (__h = 11; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0);
__h++;
}
}
__global__ void kernel0_4(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __storeValid = __writeValid4;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_3_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_3_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_3_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_3_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(3, __reg_3_2, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(4, __reg_3_0, __reg_3_1, __reg_3_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(4, __reg_3_0, __reg_3_1, __reg_3_2);
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h + 0, __reg_3_2, __reg_3_0, __reg_0_1);
}
}
else
{
for (__h = 9; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
}
}
__global__ void kernel0_3(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 506;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __storeValid = __writeValid3;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_2_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_2_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(2, __reg_2_1, __reg_2_2, __reg_2_0);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(3, __reg_2_2, __reg_2_0, __reg_2_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(3, __reg_2_2, __reg_2_0, __reg_2_1);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 7; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h + 0, __reg_2_0, __reg_2_1, __reg_0_2);
}
}
else
{
for (__h = 7; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1);
__h++;
}
}
__global__ void kernel0_2(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __storeValid = __writeValid2;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_1_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_1_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(1, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(2, __reg_1_1, __reg_1_2, __reg_1_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(2, __reg_1_1, __reg_1_2, __reg_1_0);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h + 0, __reg_1_1, __reg_1_2, __reg_0_0);
}
}
else
{
for (__h = 5; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0);
__h++;
}
}
__global__ void kernel0_1(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 510;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
__shared__ float __b_sb_double[__blockSize * 2];
float *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((((((5.1f * (__REGREF(__a, 0))) + (12.1f * (__SBREF(__b_sb, -1)))) + (15.0f * (__REGREF(__b, 0)))) + (12.2f * (__SBREF(__b_sb, 1)))) + (5.2f * (__REGREF(__c, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__STORE(1, __reg_0_0, __reg_0_1, __reg_0_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__STORE(1, __reg_0_0, __reg_0_1, __reg_0_2);
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 3; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h + 0, __reg_0_2, __reg_0_0, __reg_0_1);
}
}
else
{
for (__h = 3; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
}
}
|
09343b8b5ab3f8a22c31601e919a94da15c559e0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "AddVector.h"
#include <iostream>
#include "Device.h"
using std::cout;
using std::endl;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void addVector(float* ptrDevV1, float* ptrDevV2, float* ptrDevW,int n);
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Constructeur *|
\*-------------------------------------*/
AddVector::AddVector(float* ptrV1, float* ptrV2, float* ptrW, int n) :
ptrV1(ptrV1), ptrV2(ptrV2), ptrW(ptrW), n(n)
{
this->sizeOctet = n * sizeof(float); // octet
// MM
{
// MM (malloc Device)
{
HANDLE_ERROR(hipMalloc(&ptrDevV1, sizeOctet));
// TODO ptrV2
HANDLE_ERROR(hipMalloc(&ptrDevV2, sizeOctet));
// TODO ptrW
HANDLE_ERROR(hipMalloc(&ptrDevW, sizeOctet));
}
// MM (memset Device)
{
HANDLE_ERROR(hipMemset(ptrDevW, 0, sizeOctet));
}
// MM (copy Host->Device)
{
HANDLE_ERROR(hipMemcpy(ptrDevV1, ptrV1, sizeOctet, hipMemcpyHostToDevice));
// TODO ptrV2
HANDLE_ERROR(hipMemcpy(ptrDevV2, ptrV2, sizeOctet, hipMemcpyHostToDevice));
}
Device::lastCudaError("AddVector MM (end allocation)"); // temp debug
}
// Grid
{
this->dg = dim3(16, 2, 1); // disons, a optimiser selon le gpu
this->db = dim3(32, 4, 1); // disons, a optimiser selon le gpu
Device::gridHeuristic(dg, db);
}
}
AddVector::~AddVector(void)
{
//MM (device free)
{
HANDLE_ERROR(hipFree(ptrDevV1));
// TODO ptrV2
HANDLE_ERROR(hipFree(ptrDevV2));
// TODO ptrW
HANDLE_ERROR(hipFree(ptrDevW));
Device::lastCudaError("AddVector MM (end deallocation)"); // temp debug
}
}
/*--------------------------------------*\
|* Methode *|
\*-------------------------------------*/
void AddVector::run()
{
Device::lastCudaError("addVecteur (before)"); // temp debug
hipLaunchKernelGGL(( addVector), dim3(dg),dim3(db), 0, 0, ptrDevV1, ptrDevV2, ptrDevW, n); // assynchrone
Device::lastCudaError("addVecteur (after)"); // temp debug
Device::synchronize(); // Temp, only for printf in GPU
// MM (Device -> Host)
{
HANDLE_ERROR(hipMemcpy(ptrW, ptrDevW, sizeOctet, hipMemcpyDeviceToHost)); // barriere synchronisation implicite
}
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
09343b8b5ab3f8a22c31601e919a94da15c559e0.cu
|
#include "AddVector.h"
#include <iostream>
#include "Device.h"
using std::cout;
using std::endl;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void addVector(float* ptrDevV1, float* ptrDevV2, float* ptrDevW,int n);
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Constructeur *|
\*-------------------------------------*/
AddVector::AddVector(float* ptrV1, float* ptrV2, float* ptrW, int n) :
ptrV1(ptrV1), ptrV2(ptrV2), ptrW(ptrW), n(n)
{
this->sizeOctet = n * sizeof(float); // octet
// MM
{
// MM (malloc Device)
{
HANDLE_ERROR(cudaMalloc(&ptrDevV1, sizeOctet));
// TODO ptrV2
HANDLE_ERROR(cudaMalloc(&ptrDevV2, sizeOctet));
// TODO ptrW
HANDLE_ERROR(cudaMalloc(&ptrDevW, sizeOctet));
}
// MM (memset Device)
{
HANDLE_ERROR(cudaMemset(ptrDevW, 0, sizeOctet));
}
// MM (copy Host->Device)
{
HANDLE_ERROR(cudaMemcpy(ptrDevV1, ptrV1, sizeOctet, cudaMemcpyHostToDevice));
// TODO ptrV2
HANDLE_ERROR(cudaMemcpy(ptrDevV2, ptrV2, sizeOctet, cudaMemcpyHostToDevice));
}
Device::lastCudaError("AddVector MM (end allocation)"); // temp debug
}
// Grid
{
this->dg = dim3(16, 2, 1); // disons, a optimiser selon le gpu
this->db = dim3(32, 4, 1); // disons, a optimiser selon le gpu
Device::gridHeuristic(dg, db);
}
}
AddVector::~AddVector(void)
{
//MM (device free)
{
HANDLE_ERROR(cudaFree(ptrDevV1));
// TODO ptrV2
HANDLE_ERROR(cudaFree(ptrDevV2));
// TODO ptrW
HANDLE_ERROR(cudaFree(ptrDevW));
Device::lastCudaError("AddVector MM (end deallocation)"); // temp debug
}
}
/*--------------------------------------*\
|* Methode *|
\*-------------------------------------*/
void AddVector::run()
{
Device::lastCudaError("addVecteur (before)"); // temp debug
addVector<<<dg,db>>>(ptrDevV1, ptrDevV2, ptrDevW, n); // assynchrone
Device::lastCudaError("addVecteur (after)"); // temp debug
Device::synchronize(); // Temp, only for printf in GPU
// MM (Device -> Host)
{
HANDLE_ERROR(cudaMemcpy(ptrW, ptrDevW, sizeOctet, cudaMemcpyDeviceToHost)); // barriere synchronisation implicite
}
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
0a6d8d76b15393e2cad9d7c0e9752198a8e15c98.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include <cfloat>
// #include "thrust/device_vector.h"
#include "caffe/layers/norm_conv_layer.hpp"
// #include "caffe/util/math_functions.hpp"
// #include "caffe/util/im2dist.hpp"
namespace caffe {
#ifndef CPU_ONLY
template <typename Dtype>
__global__ void kernel_channel_max(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype maxval = -FLT_MAX;
for (int c = 0; c < channels; ++c) {
maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);
}
out[index] = maxval;
}
}
template <typename Dtype>
__global__ void kernel_channel_subtract(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_max, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] -= channel_max[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
out[index] = exp(data[index]);
}
}
template <typename Dtype>
__global__ void kernel_channel_sum(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* channel_sum) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype sum = 0;
for (int c = 0; c < channels; ++c) {
sum += data[(n * channels + c) * spatial_dim + s];
}
channel_sum[index] = sum;
}
}
template <typename Dtype>
__global__ void kernel_channel_div(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_sum, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] /= channel_sum[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_channel_dot(const int num, const int channels,
const int spatial_dim, const Dtype* data_1, const Dtype* data_2,
Dtype* channel_dot) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype dot = 0;
for (int c = 0; c < channels; ++c) {
dot += (data_1[(n * channels + c) * spatial_dim + s]
* data_2[(n * channels + c) * spatial_dim + s]);
}
channel_dot[index] = dot;
}
}
template <typename Dtype>
void NormConvLayer<Dtype>::norm_weight_gpu_gemm(const Dtype* output, Dtype* weights) {
// prep col_buffer_ and emb_col_buffer_
const int emb_count = emb_col_buffer_.count();
// multiply the two
for (int c=0; c < conv_in_channels_; ++c) {
caffe_gpu_mul(emb_count,
soft_col_buffer_.gpu_data(),
col_buffer_.gpu_data() + c * emb_count,
res_col_buffer_.mutable_gpu_data() + c * emb_count);
}
// gemm into weights
for (int g = 0; g < group_; ++g) {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, conv_out_channels_ / group_,
kernel_dim_, conv_out_spatial_dim_,
(Dtype)1., output + output_offset_ * g,
res_col_buffer_.gpu_data() + col_offset_ * g,
(Dtype)1., weights + weight_offset_ * g);
}
}
template <typename Dtype>
void NormConvLayer<Dtype>::norm_forward_gpu_gemm(const Dtype* weights,
Dtype* output, bool skip_im2col) {
// prep col_buffer_ and emb_col_buffer_
const int emb_count = soft_col_buffer_.count();
// multiply the two
for (int c=0; c < conv_in_channels_; ++c) {
caffe_gpu_mul(emb_count,
soft_col_buffer_.gpu_data(),
col_buffer_.gpu_data() + c * emb_count,
res_col_buffer_.mutable_gpu_data() + c * emb_count);
}
// gemm into output
for (int g = 0; g < group_; ++g) {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, conv_out_channels_ /
group_, conv_out_spatial_dim_, kernel_dim_,
(Dtype)1., weights + weight_offset_ * g,
res_col_buffer_.mutable_gpu_data() + col_offset_ * g,
(Dtype)0., output + output_offset_ * g);
}
}
template <typename Dtype>
void NormConvLayer<Dtype>::norm_backward_gpu_img_gemm(const Dtype* top_diff,
const Dtype* weights, Dtype* input_img) {
// prep col_buffer_ and emb_col_buffer_
const int emb_count = emb_col_buffer_.count();
// gemm into res_col_buffer_
for (int g = 0; g < group_; ++g) {
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, kernel_dim_,
conv_out_spatial_dim_, conv_out_channels_ / group_,
(Dtype)1., weights + weight_offset_ * g, top_diff + output_offset_ * g,
(Dtype)0., res_col_buffer_.mutable_gpu_data() + col_offset_ * g);
}
// multiply by exp(scale(emb))
for (int c=0; c < conv_in_channels_; ++c) {
caffe_gpu_mul(emb_count,
soft_col_buffer_.gpu_data(),
res_col_buffer_.gpu_data() + c * emb_count,
res_col_buffer_.mutable_gpu_data() + c * emb_count);
}
// col2im
if (!is_1x1_ && !bottom_is_im2col_) {
conv_col2im_gpu(res_col_buffer_.gpu_data(), input_img);
}
}
template <typename Dtype>
void NormConvLayer<Dtype>::norm_backward_gpu_emb_gemm(const Dtype* top_diff,
const Dtype* weights, Dtype* emb_diff) {
// prep col_buffer_ and emb_col_buffer_
const int img_count = res_col_buffer_.count();
const int emb_count = emb_col_buffer_.count();
// gemm into res_col_buffer_
for (int g = 0; g < group_; ++g) {
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, kernel_dim_,
conv_out_spatial_dim_, conv_out_channels_ / group_,
(Dtype)1., weights + weight_offset_ * g, top_diff + output_offset_ * g,
(Dtype)0., res_col_buffer_.mutable_gpu_data() + col_offset_ * g);
}
// mult by img
caffe_gpu_mul(img_count,
col_buffer_.gpu_data(),
res_col_buffer_.gpu_data(),
res_col_buffer_.mutable_gpu_data());
// sum down to one channel
for (int c=1; c < conv_in_channels_; ++c) {
caffe_gpu_axpy(emb_count,
Dtype(1),
res_col_buffer_.gpu_data() + c * emb_count,
res_col_buffer_.mutable_gpu_data());
}
Dtype* sum_data = sum_buffer_.mutable_gpu_data();
int mask_size = emb_col_buffer_.count(0,channel_axis_);
// Compute inner1d(top_diff, top_data) and subtract them from the bottom diff.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_dot<Dtype>), dim3(CAFFE_GET_BLOCKS(1 * conv_out_spatial_dim_)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, 1, mask_size, conv_out_spatial_dim_,
res_col_buffer_.cpu_data(), soft_col_buffer_.cpu_data(), sum_data);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_subtract<Dtype>), dim3(CAFFE_GET_BLOCKS(emb_count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, emb_count, 1, mask_size, conv_out_spatial_dim_,
sum_data, res_col_buffer_.mutable_gpu_data());
// elementwise multiplication
caffe_gpu_mul<Dtype>(emb_count, res_col_buffer_.mutable_gpu_data(), soft_col_buffer_.cpu_data(), res_col_buffer_.mutable_gpu_data());
// // compute dot(top_diff, top_data) and subtract them from the bottom diff
// for (int k = 0; k < conv_out_spatial_dim_; ++k) {
// sum_data[k] = caffe_cpu_strided_dot<Dtype>(mask_size,
// res_col_buffer_.cpu_data() + k, conv_out_spatial_dim_,
// soft_col_buffer_.cpu_data() + k, conv_out_spatial_dim_);
// }
// // subtraction
// caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, mask_size, conv_out_spatial_dim_, 1,
// -1., sum_multiplier_.gpu_data(), sum_data, 1., res_col_buffer_.mutable_gpu_data());
// // elementwise multiplication
// caffe_gpu_mul(emb_count, res_col_buffer_.gpu_data(), soft_col_buffer_.gpu_data(), res_col_buffer_.mutable_gpu_data());
if (scale_term_) {
// scale the res
Dtype* scale_factor = this->blobs_[scale_ind_].get()->mutable_cpu_data();
caffe_gpu_scale(emb_count,
-scale_factor[0],
res_col_buffer_.gpu_data(),
res_col_buffer_.mutable_gpu_data());
}
// dist2im
if (!is_1x1_ && !bottom_is_im2col_) {
conv_dist2im_gpu(res_col_buffer_.gpu_data(),
diff_col_buffer_.gpu_data(),
emb_diff);
}
}
template <typename Dtype>
void NormConvLayer<Dtype>::backward_gpu_scale(Dtype* scale_diff,
const Dtype* weights, const Dtype* input_emb, const Dtype* top_diff) {
// prep col_buffer_ and emb_col_buffer_
const int img_count = res_col_buffer_.count();
const int emb_count = emb_col_buffer_.count();
// gemm into res_col_buffer_
for (int g = 0; g < group_; ++g) {
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, kernel_dim_,
conv_out_spatial_dim_, conv_out_channels_ / group_,
(Dtype)1., weights + weight_offset_ * g, top_diff + output_offset_ * g,
(Dtype)0., res_col_buffer_.mutable_gpu_data() + col_offset_ * g);
}
// mult by img
caffe_gpu_mul(img_count,
col_buffer_.gpu_data(),
res_col_buffer_.gpu_data(),
res_col_buffer_.mutable_gpu_data());
// sum down to one channel
for (int c=1; c < conv_in_channels_; ++c) {
caffe_gpu_axpy(emb_count,
Dtype(1),
res_col_buffer_.gpu_data() + c * emb_count,
res_col_buffer_.mutable_gpu_data());
}
Dtype* sum_data = sum_buffer_.mutable_gpu_data();
int mask_size = emb_col_buffer_.count(0,channel_axis_);
// Compute inner1d(top_diff, top_data) and subtract them from the bottom diff.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_dot<Dtype>), dim3(CAFFE_GET_BLOCKS(1 * conv_out_spatial_dim_)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, 1, mask_size, conv_out_spatial_dim_,
res_col_buffer_.cpu_data(), soft_col_buffer_.cpu_data(), sum_data);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_subtract<Dtype>), dim3(CAFFE_GET_BLOCKS(emb_count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, emb_count, 1, mask_size, conv_out_spatial_dim_,
sum_data, res_col_buffer_.mutable_gpu_data());
// elementwise multiplication
caffe_gpu_mul<Dtype>(emb_count, res_col_buffer_.mutable_gpu_data(), soft_col_buffer_.cpu_data(), res_col_buffer_.mutable_gpu_data());
// // compute dot(top_diff, top_data) and subtract them from the bottom diff
// for (int k = 0; k < conv_out_spatial_dim_; ++k) {
// sum_data[k] = caffe_cpu_strided_dot<Dtype>(mask_size,
// res_col_buffer_.cpu_data() + k, conv_out_spatial_dim_,
// soft_col_buffer_.cpu_data() + k, conv_out_spatial_dim_);
// }
// // subtraction
// caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, mask_size, conv_out_spatial_dim_, 1,
// -1., sum_multiplier_.gpu_data(), sum_data, 1., res_col_buffer_.mutable_gpu_data());
// // elementwise multiplication
// caffe_gpu_mul(emb_count, res_col_buffer_.gpu_data(), soft_col_buffer_.gpu_data(), res_col_buffer_.mutable_gpu_data());
// get a fresh embdist
conv_im2dist_gpu(input_emb, emb_col_buffer_.mutable_gpu_data(),
diff_col_buffer_.mutable_gpu_data());
// mult by embdist
caffe_gpu_mul(emb_count,
emb_col_buffer_.gpu_data(),
res_col_buffer_.gpu_data(),
res_col_buffer_.mutable_gpu_data());
// mult by scale sign
caffe_gpu_scale(emb_count, Dtype(-1), res_col_buffer_.gpu_data(), res_col_buffer_.mutable_gpu_data());
// add it up
caffe_gpu_gemv<Dtype>(CblasNoTrans, 1, emb_count, 1.,
res_col_buffer_.gpu_data(), sum_multiplier_.gpu_data(), 1., scale_diff);
}
template <typename Dtype>
void NormConvLayer<Dtype>::prep_buffers_gpu(const Dtype* weights,
const Dtype* input_img, const Dtype* input_emb) {
const int emb_count = emb_col_buffer_.count();
// get fresh copies of these
conv_im2col_gpu(input_img, col_buffer_.mutable_gpu_data());
conv_im2dist_gpu(input_emb, emb_col_buffer_.mutable_gpu_data(),
diff_col_buffer_.mutable_gpu_data());
// for (int i=47600;i<47610;i++)
// LOG(ERROR) << "distemb[" << i << "] = " << emb_col_buffer_.cpu_data()[i];
// LOG(ERROR) << " <<< emb count = " << emb_col_buffer_.count();
// // for (int i=0;i<0+62500*13;i+=62500)
// for (int i=47600;i<47610;i++)
// LOG(ERROR) << "distemb[" << i << "] = " << emb_col_buffer_.cpu_data()[i];
// LOG(ERROR) << " <<< ";
// scale the embs
if (scale_term_) {
Dtype* scale_factor = this->blobs_[scale_ind_].get()->mutable_cpu_data();
caffe_gpu_scale(emb_count, -scale_factor[0],
emb_col_buffer_.gpu_data(),
emb_col_buffer_.mutable_gpu_data());
}
// // for (int i=0;i<0+62500*13;i+=62500)
// for (int i=47600;i<47610;i++)
// LOG(ERROR) << "scalemb[" << i << "] = " << emb_col_buffer_.cpu_data()[i];
// softmax...
Dtype* sum_data = sum_buffer_.mutable_gpu_data();
int mask_size = emb_col_buffer_.count(0,channel_axis_);
caffe_copy(emb_count, emb_col_buffer_.gpu_data(), soft_col_buffer_.mutable_gpu_data());
// We need to subtract the max to avoid numerical issues, compute the exp,
// and then normalize.
// compute max
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_max<Dtype>), dim3(CAFFE_GET_BLOCKS(1 * conv_out_spatial_dim_)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, 1, mask_size, conv_out_spatial_dim_, soft_col_buffer_.mutable_gpu_data(),
sum_data);
// for (int i=47600;i<47610;i++)
// LOG(ERROR) << "sum_data[" << i << "] = " << sum_buffer_.cpu_data()[i];
// subtract
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_subtract<Dtype>), dim3(CAFFE_GET_BLOCKS(emb_count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, emb_count, 1, mask_size, conv_out_spatial_dim_,
sum_data, soft_col_buffer_.mutable_gpu_data());
// for (int i=47600;i<47610;i++)
// LOG(ERROR) << "subemb[" << i << "] = " << soft_col_buffer_.cpu_data()[i];
// exponentiate
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_exp<Dtype>), dim3(CAFFE_GET_BLOCKS(emb_count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
emb_count, soft_col_buffer_.mutable_gpu_data(), soft_col_buffer_.mutable_gpu_data());
// for (int i=47600;i<47610;i++)
// LOG(ERROR) << "expemb[" << i << "] = " << soft_col_buffer_.cpu_data()[i];
// sum after exp
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_sum<Dtype>), dim3(CAFFE_GET_BLOCKS(1 * conv_out_spatial_dim_)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, 1, mask_size, conv_out_spatial_dim_, soft_col_buffer_.mutable_gpu_data(),
sum_data);
// for (int i=47600;i<47610;i++)
// LOG(ERROR) << "sum_data[" << i << "] = " << sum_buffer_.cpu_data()[i];
// divide
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_div<Dtype>), dim3(CAFFE_GET_BLOCKS(emb_count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, emb_count, 1, mask_size, conv_out_spatial_dim_,
sum_data, soft_col_buffer_.mutable_gpu_data());
// for (int i=47600;i<47610;i++)
// LOG(ERROR) << "divemb[" << i << "] = " << soft_col_buffer_.cpu_data()[i];
}
// template <typename Dtype>
// void NormConvLayer<Dtype>::norm_backward_gpu_all(const Dtype* top_diff,
// const Dtype* weights, const Dtype* input_img, const Dtype* input_emb,
// Dtype* weight_diff, Dtype* img_diff, Dtype* emb_diff, Dtype* scale_diff) {
// // doesn't work yet
// }
template <typename Dtype>
void NormConvLayer<Dtype>::forward_gpu_bias(Dtype* output,
const Dtype* bias) {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_output_,
out_spatial_dim_, 1, (Dtype)1., bias, sum_multiplier_.gpu_data(),
(Dtype)1., output);
}
template <typename Dtype>
void NormConvLayer<Dtype>::backward_gpu_bias(Dtype* bias,
const Dtype* input) {
caffe_gpu_gemv<Dtype>(CblasNoTrans, num_output_, out_spatial_dim_, 1.,
input, sum_multiplier_.gpu_data(), 1., bias);
}
template <typename Dtype>
void NormConvLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* weight = this->blobs_[0]->gpu_data();
const Dtype* bottom_img = bottom[0]->gpu_data();
const Dtype* bottom_emb = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
for (int n = 0; n < num_; ++n) {
prep_buffers_gpu(weight,
bottom_img + n * bottom_dim_,
bottom_emb + n * emb_bottom_dim_);
norm_forward_gpu_gemm(weight,
top_data + n * top_dim_);
if (bias_term_) {
const Dtype* bias = this->blobs_[1]->gpu_data();
forward_gpu_bias(top_data + n * top_dim_, bias);
}
}
}
template <typename Dtype>
void NormConvLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = this->blobs_[0]->gpu_data();
Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff();
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* bottom_img = bottom[0]->gpu_data();
const Dtype* bottom_emb = bottom[1]->gpu_data();
Dtype* bottom_img_diff = bottom[0]->mutable_gpu_diff();
Dtype* bottom_emb_diff = bottom[1]->mutable_gpu_diff();
// Bias gradient, if necessary.
if (bias_term_ && this->param_propagate_down_[1]) {
Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff();
for (int n = 0; n < num_; ++n) {
backward_gpu_bias(bias_diff, top_diff + n * top_dim_);
}
}
if (this->param_propagate_down_[0]
|| (scale_term_ && this->param_propagate_down_[scale_ind_])
|| propagate_down[0] || propagate_down[1]) {
for (int n = 0; n < num_; ++n) {
// commonly we will want to bprop to everything: weights, image, embeddings, and scale.
// we can save a bit of time doing these together.
// if (param_propagate_down_[0] && scale_term_ &&
// param_propagate_down_[scale_ind_] &&
// propagate_down[0] && propagate_down[1]) {
// Dtype* scale_diff = blobs_[scale_ind_]->mutable_cpu_diff();
// norm_backward_gpu_all(top_diff + n * top_dim_,
// weight,
// bottom_img + n * bottom_dim_,
// bottom_emb + n * emb_bottom_dim_,
// weight_diff,
// bottom_img_diff + n * bottom_dim_,
// bottom_emb_diff + n * emb_bottom_dim_,
// scale_diff);
// } else {
// all except scale need a fresh run of im2col and im2dist for data "n"
if (this->param_propagate_down_[0] || propagate_down[0] || propagate_down[1])
prep_buffers_gpu(weight,
bottom_img + n * bottom_dim_,
bottom_emb + n * emb_bottom_dim_);
// gradient w.r.t. weight. Note that we will accumulate diffs.
if (this->param_propagate_down_[0])
norm_weight_gpu_gemm(top_diff + n * top_dim_, weight_diff);
// gradient w.r.t. bottom data, if necessary.
if (propagate_down[0])
norm_backward_gpu_img_gemm(top_diff + n * top_dim_, weight,
bottom_img_diff + n * bottom_dim_);
if (propagate_down[1])
norm_backward_gpu_emb_gemm(top_diff + n * top_dim_, weight,
bottom_emb_diff + n * emb_bottom_dim_);
// gradient w.r.t. scale, if necessary
if (scale_term_ && this->param_propagate_down_[scale_ind_]) {
Dtype* scale_diff = this->blobs_[scale_ind_]->mutable_gpu_diff();
backward_gpu_scale(scale_diff, weight,
bottom_emb + n * emb_bottom_dim_,
top_diff + n * top_dim_);
}
}
// }
}
}
#endif
INSTANTIATE_LAYER_GPU_FUNCS(NormConvLayer);
} // namespace caffe
|
0a6d8d76b15393e2cad9d7c0e9752198a8e15c98.cu
|
#include <vector>
#include <cfloat>
// #include "thrust/device_vector.h"
#include "caffe/layers/norm_conv_layer.hpp"
// #include "caffe/util/math_functions.hpp"
// #include "caffe/util/im2dist.hpp"
namespace caffe {
#ifndef CPU_ONLY
template <typename Dtype>
__global__ void kernel_channel_max(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype maxval = -FLT_MAX;
for (int c = 0; c < channels; ++c) {
maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);
}
out[index] = maxval;
}
}
template <typename Dtype>
__global__ void kernel_channel_subtract(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_max, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] -= channel_max[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
out[index] = exp(data[index]);
}
}
template <typename Dtype>
__global__ void kernel_channel_sum(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* channel_sum) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype sum = 0;
for (int c = 0; c < channels; ++c) {
sum += data[(n * channels + c) * spatial_dim + s];
}
channel_sum[index] = sum;
}
}
template <typename Dtype>
__global__ void kernel_channel_div(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_sum, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] /= channel_sum[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_channel_dot(const int num, const int channels,
const int spatial_dim, const Dtype* data_1, const Dtype* data_2,
Dtype* channel_dot) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype dot = 0;
for (int c = 0; c < channels; ++c) {
dot += (data_1[(n * channels + c) * spatial_dim + s]
* data_2[(n * channels + c) * spatial_dim + s]);
}
channel_dot[index] = dot;
}
}
template <typename Dtype>
void NormConvLayer<Dtype>::norm_weight_gpu_gemm(const Dtype* output, Dtype* weights) {
// prep col_buffer_ and emb_col_buffer_
const int emb_count = emb_col_buffer_.count();
// multiply the two
for (int c=0; c < conv_in_channels_; ++c) {
caffe_gpu_mul(emb_count,
soft_col_buffer_.gpu_data(),
col_buffer_.gpu_data() + c * emb_count,
res_col_buffer_.mutable_gpu_data() + c * emb_count);
}
// gemm into weights
for (int g = 0; g < group_; ++g) {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, conv_out_channels_ / group_,
kernel_dim_, conv_out_spatial_dim_,
(Dtype)1., output + output_offset_ * g,
res_col_buffer_.gpu_data() + col_offset_ * g,
(Dtype)1., weights + weight_offset_ * g);
}
}
template <typename Dtype>
void NormConvLayer<Dtype>::norm_forward_gpu_gemm(const Dtype* weights,
Dtype* output, bool skip_im2col) {
// prep col_buffer_ and emb_col_buffer_
const int emb_count = soft_col_buffer_.count();
// multiply the two
for (int c=0; c < conv_in_channels_; ++c) {
caffe_gpu_mul(emb_count,
soft_col_buffer_.gpu_data(),
col_buffer_.gpu_data() + c * emb_count,
res_col_buffer_.mutable_gpu_data() + c * emb_count);
}
// gemm into output
for (int g = 0; g < group_; ++g) {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, conv_out_channels_ /
group_, conv_out_spatial_dim_, kernel_dim_,
(Dtype)1., weights + weight_offset_ * g,
res_col_buffer_.mutable_gpu_data() + col_offset_ * g,
(Dtype)0., output + output_offset_ * g);
}
}
template <typename Dtype>
void NormConvLayer<Dtype>::norm_backward_gpu_img_gemm(const Dtype* top_diff,
const Dtype* weights, Dtype* input_img) {
// prep col_buffer_ and emb_col_buffer_
const int emb_count = emb_col_buffer_.count();
// gemm into res_col_buffer_
for (int g = 0; g < group_; ++g) {
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, kernel_dim_,
conv_out_spatial_dim_, conv_out_channels_ / group_,
(Dtype)1., weights + weight_offset_ * g, top_diff + output_offset_ * g,
(Dtype)0., res_col_buffer_.mutable_gpu_data() + col_offset_ * g);
}
// multiply by exp(scale(emb))
for (int c=0; c < conv_in_channels_; ++c) {
caffe_gpu_mul(emb_count,
soft_col_buffer_.gpu_data(),
res_col_buffer_.gpu_data() + c * emb_count,
res_col_buffer_.mutable_gpu_data() + c * emb_count);
}
// col2im
if (!is_1x1_ && !bottom_is_im2col_) {
conv_col2im_gpu(res_col_buffer_.gpu_data(), input_img);
}
}
template <typename Dtype>
void NormConvLayer<Dtype>::norm_backward_gpu_emb_gemm(const Dtype* top_diff,
const Dtype* weights, Dtype* emb_diff) {
// prep col_buffer_ and emb_col_buffer_
const int img_count = res_col_buffer_.count();
const int emb_count = emb_col_buffer_.count();
// gemm into res_col_buffer_
for (int g = 0; g < group_; ++g) {
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, kernel_dim_,
conv_out_spatial_dim_, conv_out_channels_ / group_,
(Dtype)1., weights + weight_offset_ * g, top_diff + output_offset_ * g,
(Dtype)0., res_col_buffer_.mutable_gpu_data() + col_offset_ * g);
}
// mult by img
caffe_gpu_mul(img_count,
col_buffer_.gpu_data(),
res_col_buffer_.gpu_data(),
res_col_buffer_.mutable_gpu_data());
// sum down to one channel
for (int c=1; c < conv_in_channels_; ++c) {
caffe_gpu_axpy(emb_count,
Dtype(1),
res_col_buffer_.gpu_data() + c * emb_count,
res_col_buffer_.mutable_gpu_data());
}
Dtype* sum_data = sum_buffer_.mutable_gpu_data();
int mask_size = emb_col_buffer_.count(0,channel_axis_);
// Compute inner1d(top_diff, top_data) and subtract them from the bottom diff.
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_dot<Dtype><<<CAFFE_GET_BLOCKS(1 * conv_out_spatial_dim_),
CAFFE_CUDA_NUM_THREADS>>>(1, mask_size, conv_out_spatial_dim_,
res_col_buffer_.cpu_data(), soft_col_buffer_.cpu_data(), sum_data);
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_subtract<Dtype><<<CAFFE_GET_BLOCKS(emb_count),
CAFFE_CUDA_NUM_THREADS>>>(emb_count, 1, mask_size, conv_out_spatial_dim_,
sum_data, res_col_buffer_.mutable_gpu_data());
// elementwise multiplication
caffe_gpu_mul<Dtype>(emb_count, res_col_buffer_.mutable_gpu_data(), soft_col_buffer_.cpu_data(), res_col_buffer_.mutable_gpu_data());
// // compute dot(top_diff, top_data) and subtract them from the bottom diff
// for (int k = 0; k < conv_out_spatial_dim_; ++k) {
// sum_data[k] = caffe_cpu_strided_dot<Dtype>(mask_size,
// res_col_buffer_.cpu_data() + k, conv_out_spatial_dim_,
// soft_col_buffer_.cpu_data() + k, conv_out_spatial_dim_);
// }
// // subtraction
// caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, mask_size, conv_out_spatial_dim_, 1,
// -1., sum_multiplier_.gpu_data(), sum_data, 1., res_col_buffer_.mutable_gpu_data());
// // elementwise multiplication
// caffe_gpu_mul(emb_count, res_col_buffer_.gpu_data(), soft_col_buffer_.gpu_data(), res_col_buffer_.mutable_gpu_data());
if (scale_term_) {
// scale the res
Dtype* scale_factor = this->blobs_[scale_ind_].get()->mutable_cpu_data();
caffe_gpu_scale(emb_count,
-scale_factor[0],
res_col_buffer_.gpu_data(),
res_col_buffer_.mutable_gpu_data());
}
// dist2im
if (!is_1x1_ && !bottom_is_im2col_) {
conv_dist2im_gpu(res_col_buffer_.gpu_data(),
diff_col_buffer_.gpu_data(),
emb_diff);
}
}
template <typename Dtype>
void NormConvLayer<Dtype>::backward_gpu_scale(Dtype* scale_diff,
const Dtype* weights, const Dtype* input_emb, const Dtype* top_diff) {
// prep col_buffer_ and emb_col_buffer_
const int img_count = res_col_buffer_.count();
const int emb_count = emb_col_buffer_.count();
// gemm into res_col_buffer_
for (int g = 0; g < group_; ++g) {
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, kernel_dim_,
conv_out_spatial_dim_, conv_out_channels_ / group_,
(Dtype)1., weights + weight_offset_ * g, top_diff + output_offset_ * g,
(Dtype)0., res_col_buffer_.mutable_gpu_data() + col_offset_ * g);
}
// mult by img
caffe_gpu_mul(img_count,
col_buffer_.gpu_data(),
res_col_buffer_.gpu_data(),
res_col_buffer_.mutable_gpu_data());
// sum down to one channel
for (int c=1; c < conv_in_channels_; ++c) {
caffe_gpu_axpy(emb_count,
Dtype(1),
res_col_buffer_.gpu_data() + c * emb_count,
res_col_buffer_.mutable_gpu_data());
}
Dtype* sum_data = sum_buffer_.mutable_gpu_data();
int mask_size = emb_col_buffer_.count(0,channel_axis_);
// Compute inner1d(top_diff, top_data) and subtract them from the bottom diff.
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_dot<Dtype><<<CAFFE_GET_BLOCKS(1 * conv_out_spatial_dim_),
CAFFE_CUDA_NUM_THREADS>>>(1, mask_size, conv_out_spatial_dim_,
res_col_buffer_.cpu_data(), soft_col_buffer_.cpu_data(), sum_data);
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_subtract<Dtype><<<CAFFE_GET_BLOCKS(emb_count),
CAFFE_CUDA_NUM_THREADS>>>(emb_count, 1, mask_size, conv_out_spatial_dim_,
sum_data, res_col_buffer_.mutable_gpu_data());
// elementwise multiplication
caffe_gpu_mul<Dtype>(emb_count, res_col_buffer_.mutable_gpu_data(), soft_col_buffer_.cpu_data(), res_col_buffer_.mutable_gpu_data());
// // compute dot(top_diff, top_data) and subtract them from the bottom diff
// for (int k = 0; k < conv_out_spatial_dim_; ++k) {
// sum_data[k] = caffe_cpu_strided_dot<Dtype>(mask_size,
// res_col_buffer_.cpu_data() + k, conv_out_spatial_dim_,
// soft_col_buffer_.cpu_data() + k, conv_out_spatial_dim_);
// }
// // subtraction
// caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, mask_size, conv_out_spatial_dim_, 1,
// -1., sum_multiplier_.gpu_data(), sum_data, 1., res_col_buffer_.mutable_gpu_data());
// // elementwise multiplication
// caffe_gpu_mul(emb_count, res_col_buffer_.gpu_data(), soft_col_buffer_.gpu_data(), res_col_buffer_.mutable_gpu_data());
// get a fresh embdist
conv_im2dist_gpu(input_emb, emb_col_buffer_.mutable_gpu_data(),
diff_col_buffer_.mutable_gpu_data());
// mult by embdist
caffe_gpu_mul(emb_count,
emb_col_buffer_.gpu_data(),
res_col_buffer_.gpu_data(),
res_col_buffer_.mutable_gpu_data());
// mult by scale sign
caffe_gpu_scale(emb_count, Dtype(-1), res_col_buffer_.gpu_data(), res_col_buffer_.mutable_gpu_data());
// add it up
caffe_gpu_gemv<Dtype>(CblasNoTrans, 1, emb_count, 1.,
res_col_buffer_.gpu_data(), sum_multiplier_.gpu_data(), 1., scale_diff);
}
template <typename Dtype>
void NormConvLayer<Dtype>::prep_buffers_gpu(const Dtype* weights,
const Dtype* input_img, const Dtype* input_emb) {
const int emb_count = emb_col_buffer_.count();
// get fresh copies of these
conv_im2col_gpu(input_img, col_buffer_.mutable_gpu_data());
conv_im2dist_gpu(input_emb, emb_col_buffer_.mutable_gpu_data(),
diff_col_buffer_.mutable_gpu_data());
// for (int i=47600;i<47610;i++)
// LOG(ERROR) << "distemb[" << i << "] = " << emb_col_buffer_.cpu_data()[i];
// LOG(ERROR) << " <<< emb count = " << emb_col_buffer_.count();
// // for (int i=0;i<0+62500*13;i+=62500)
// for (int i=47600;i<47610;i++)
// LOG(ERROR) << "distemb[" << i << "] = " << emb_col_buffer_.cpu_data()[i];
// LOG(ERROR) << " <<< ";
// scale the embs
if (scale_term_) {
Dtype* scale_factor = this->blobs_[scale_ind_].get()->mutable_cpu_data();
caffe_gpu_scale(emb_count, -scale_factor[0],
emb_col_buffer_.gpu_data(),
emb_col_buffer_.mutable_gpu_data());
}
// // for (int i=0;i<0+62500*13;i+=62500)
// for (int i=47600;i<47610;i++)
// LOG(ERROR) << "scalemb[" << i << "] = " << emb_col_buffer_.cpu_data()[i];
// softmax...
Dtype* sum_data = sum_buffer_.mutable_gpu_data();
int mask_size = emb_col_buffer_.count(0,channel_axis_);
caffe_copy(emb_count, emb_col_buffer_.gpu_data(), soft_col_buffer_.mutable_gpu_data());
// We need to subtract the max to avoid numerical issues, compute the exp,
// and then normalize.
// compute max
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_max<Dtype><<<CAFFE_GET_BLOCKS(1 * conv_out_spatial_dim_),
CAFFE_CUDA_NUM_THREADS>>>(1, mask_size, conv_out_spatial_dim_, soft_col_buffer_.mutable_gpu_data(),
sum_data);
// for (int i=47600;i<47610;i++)
// LOG(ERROR) << "sum_data[" << i << "] = " << sum_buffer_.cpu_data()[i];
// subtract
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_subtract<Dtype><<<CAFFE_GET_BLOCKS(emb_count),
CAFFE_CUDA_NUM_THREADS>>>(emb_count, 1, mask_size, conv_out_spatial_dim_,
sum_data, soft_col_buffer_.mutable_gpu_data());
// for (int i=47600;i<47610;i++)
// LOG(ERROR) << "subemb[" << i << "] = " << soft_col_buffer_.cpu_data()[i];
// exponentiate
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_exp<Dtype><<<CAFFE_GET_BLOCKS(emb_count), CAFFE_CUDA_NUM_THREADS>>>(
emb_count, soft_col_buffer_.mutable_gpu_data(), soft_col_buffer_.mutable_gpu_data());
// for (int i=47600;i<47610;i++)
// LOG(ERROR) << "expemb[" << i << "] = " << soft_col_buffer_.cpu_data()[i];
// sum after exp
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_sum<Dtype><<<CAFFE_GET_BLOCKS(1 * conv_out_spatial_dim_),
CAFFE_CUDA_NUM_THREADS>>>(1, mask_size, conv_out_spatial_dim_, soft_col_buffer_.mutable_gpu_data(),
sum_data);
// for (int i=47600;i<47610;i++)
// LOG(ERROR) << "sum_data[" << i << "] = " << sum_buffer_.cpu_data()[i];
// divide
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_div<Dtype><<<CAFFE_GET_BLOCKS(emb_count),
CAFFE_CUDA_NUM_THREADS>>>(emb_count, 1, mask_size, conv_out_spatial_dim_,
sum_data, soft_col_buffer_.mutable_gpu_data());
// for (int i=47600;i<47610;i++)
// LOG(ERROR) << "divemb[" << i << "] = " << soft_col_buffer_.cpu_data()[i];
}
// template <typename Dtype>
// void NormConvLayer<Dtype>::norm_backward_gpu_all(const Dtype* top_diff,
// const Dtype* weights, const Dtype* input_img, const Dtype* input_emb,
// Dtype* weight_diff, Dtype* img_diff, Dtype* emb_diff, Dtype* scale_diff) {
// // doesn't work yet
// }
template <typename Dtype>
void NormConvLayer<Dtype>::forward_gpu_bias(Dtype* output,
const Dtype* bias) {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_output_,
out_spatial_dim_, 1, (Dtype)1., bias, sum_multiplier_.gpu_data(),
(Dtype)1., output);
}
template <typename Dtype>
void NormConvLayer<Dtype>::backward_gpu_bias(Dtype* bias,
const Dtype* input) {
caffe_gpu_gemv<Dtype>(CblasNoTrans, num_output_, out_spatial_dim_, 1.,
input, sum_multiplier_.gpu_data(), 1., bias);
}
template <typename Dtype>
void NormConvLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* weight = this->blobs_[0]->gpu_data();
const Dtype* bottom_img = bottom[0]->gpu_data();
const Dtype* bottom_emb = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
for (int n = 0; n < num_; ++n) {
prep_buffers_gpu(weight,
bottom_img + n * bottom_dim_,
bottom_emb + n * emb_bottom_dim_);
norm_forward_gpu_gemm(weight,
top_data + n * top_dim_);
if (bias_term_) {
const Dtype* bias = this->blobs_[1]->gpu_data();
forward_gpu_bias(top_data + n * top_dim_, bias);
}
}
}
template <typename Dtype>
void NormConvLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = this->blobs_[0]->gpu_data();
Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff();
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* bottom_img = bottom[0]->gpu_data();
const Dtype* bottom_emb = bottom[1]->gpu_data();
Dtype* bottom_img_diff = bottom[0]->mutable_gpu_diff();
Dtype* bottom_emb_diff = bottom[1]->mutable_gpu_diff();
// Bias gradient, if necessary.
if (bias_term_ && this->param_propagate_down_[1]) {
Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff();
for (int n = 0; n < num_; ++n) {
backward_gpu_bias(bias_diff, top_diff + n * top_dim_);
}
}
if (this->param_propagate_down_[0]
|| (scale_term_ && this->param_propagate_down_[scale_ind_])
|| propagate_down[0] || propagate_down[1]) {
for (int n = 0; n < num_; ++n) {
// commonly we will want to bprop to everything: weights, image, embeddings, and scale.
// we can save a bit of time doing these together.
// if (param_propagate_down_[0] && scale_term_ &&
// param_propagate_down_[scale_ind_] &&
// propagate_down[0] && propagate_down[1]) {
// Dtype* scale_diff = blobs_[scale_ind_]->mutable_cpu_diff();
// norm_backward_gpu_all(top_diff + n * top_dim_,
// weight,
// bottom_img + n * bottom_dim_,
// bottom_emb + n * emb_bottom_dim_,
// weight_diff,
// bottom_img_diff + n * bottom_dim_,
// bottom_emb_diff + n * emb_bottom_dim_,
// scale_diff);
// } else {
// all except scale need a fresh run of im2col and im2dist for data "n"
if (this->param_propagate_down_[0] || propagate_down[0] || propagate_down[1])
prep_buffers_gpu(weight,
bottom_img + n * bottom_dim_,
bottom_emb + n * emb_bottom_dim_);
// gradient w.r.t. weight. Note that we will accumulate diffs.
if (this->param_propagate_down_[0])
norm_weight_gpu_gemm(top_diff + n * top_dim_, weight_diff);
// gradient w.r.t. bottom data, if necessary.
if (propagate_down[0])
norm_backward_gpu_img_gemm(top_diff + n * top_dim_, weight,
bottom_img_diff + n * bottom_dim_);
if (propagate_down[1])
norm_backward_gpu_emb_gemm(top_diff + n * top_dim_, weight,
bottom_emb_diff + n * emb_bottom_dim_);
// gradient w.r.t. scale, if necessary
if (scale_term_ && this->param_propagate_down_[scale_ind_]) {
Dtype* scale_diff = this->blobs_[scale_ind_]->mutable_gpu_diff();
backward_gpu_scale(scale_diff, weight,
bottom_emb + n * emb_bottom_dim_,
top_diff + n * top_dim_);
}
}
// }
}
}
#endif
INSTANTIATE_LAYER_GPU_FUNCS(NormConvLayer);
} // namespace caffe
|
329d1c744f9acc528767e5bfc969875f3847b4f9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@author Mark Gates
@author Azzam Haidar
@generated from magmablas/zlacpy.cu normal z -> c, Tue Feb 9 16:05:28 2016
*/
#include "magma_internal.h"
// To deal with really large matrices, this launchs multiple super blocks,
// each with up to 64K-1 x 64K-1 thread blocks, which is up to 4194240 x 4194240 matrix with BLK=64.
// CUDA architecture 2.0 limits each grid dimension to 64K-1.
// Instances arose for vectors used by sparse matrices with M > 4194240, though N is small.
const magma_int_t max_blocks = 65535;
// BLK_X and BLK_Y need to be equal for claset_q to deal with diag & offdiag
// when looping over super blocks.
// Formerly, BLK_X and BLK_Y could be different.
#define BLK_X 64
#define BLK_Y BLK_X
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to claset, clacpy, clag2z, clag2z, cgeadd.
*/
static __device__
void clacpy_full_device(
int m, int n,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
/*
Similar to clacpy_full, but updates only the diagonal and below.
Blocks that are fully above the diagonal exit immediately.
Code similar to claset, clacpy, zlat2c, clat2z.
*/
static __device__
void clacpy_lower_device(
int m, int n,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (below diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y));
/* do only rows inside matrix, and blocks not above diag */
if ( ind < m && ind + BLK_X > iby ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n && ind >= iby+j; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
/*
Similar to clacpy_full, but updates only the diagonal and above.
Blocks that are fully below the diagonal exit immediately.
Code similar to claset, clacpy, zlat2c, clat2z.
*/
static __device__
void clacpy_upper_device(
int m, int n,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (above diag) */
bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby));
/* do only rows inside matrix, and blocks not below diag */
if ( ind < m && ind < iby + BLK_Y ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( ind <= iby+j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
}
//////////////////////////////////////////////////////////////////////////////////////
/*
kernel wrappers to call the device functions.
*/
__global__
void clacpy_full_kernel(
int m, int n,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
clacpy_full_device(m, n, dA, ldda, dB, lddb);
}
__global__
void clacpy_lower_kernel(
int m, int n,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
clacpy_lower_device(m, n, dA, ldda, dB, lddb);
}
__global__
void clacpy_upper_kernel(
int m, int n,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
clacpy_upper_device(m, n, dA, ldda, dB, lddb);
}
//////////////////////////////////////////////////////////////////////////////////////
/*
kernel wrappers to call the device functions for the batched routine.
*/
__global__
void clacpy_full_kernel_batched(
int m, int n,
magmaFloatComplex const * const *dAarray, int ldda,
magmaFloatComplex **dBarray, int lddb )
{
int batchid = blockIdx.z;
clacpy_full_device(m, n, dAarray[batchid], ldda, dBarray[batchid], lddb);
}
__global__
void clacpy_lower_kernel_batched(
int m, int n,
magmaFloatComplex const * const *dAarray, int ldda,
magmaFloatComplex **dBarray, int lddb )
{
int batchid = blockIdx.z;
clacpy_lower_device(m, n, dAarray[batchid], ldda, dBarray[batchid], lddb);
}
__global__
void clacpy_upper_kernel_batched(
int m, int n,
magmaFloatComplex const * const *dAarray, int ldda,
magmaFloatComplex **dBarray, int lddb )
{
int batchid = blockIdx.z;
clacpy_upper_device(m, n, dAarray[batchid], ldda, dBarray[batchid], lddb);
}
//////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
CLACPY copies all or part of a two-dimensional matrix dA to another
matrix dB.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be copied to dB.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
- = MagmaFull: All of the matrix dA
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
dA COMPLEX array, dimension (LDDA,N)
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[out]
dB COMPLEX array, dimension (LDDB,N)
The M-by-N matrix dB.
On exit, dB = dA in the locations specified by UPLO.
@param[in]
lddb INTEGER
The leading dimension of the array dB. LDDB >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_clacpy_q(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_ptr dB, magma_int_t lddb,
magma_queue_t queue )
{
#define dA(i_, j_) (dA + (i_) + (j_)*ldda)
#define dB(i_, j_) (dB + (i_) + (j_)*lddb)
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( m == 0 || n == 0 ) {
return;
}
assert( BLK_X == BLK_Y );
const magma_int_t super_NB = max_blocks*BLK_X;
dim3 super_grid( magma_ceildiv( m, super_NB ), magma_ceildiv( n, super_NB ) );
dim3 threads( BLK_X, 1 );
dim3 grid;
magma_int_t mm, nn;
if ( uplo == MagmaLower ) {
for( unsigned int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( unsigned int j=0; j < super_grid.y && j <= i; ++j ) { // from left to diagonal
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
hipLaunchKernelGGL(( clacpy_lower_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
else { // off diagonal super block
hipLaunchKernelGGL(( clacpy_full_kernel) , dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
}
}
}
else if ( uplo == MagmaUpper ) {
for( unsigned int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( unsigned int j=i; j < super_grid.y; ++j ) { // from diagonal to right
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
hipLaunchKernelGGL(( clacpy_upper_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
else { // off diagonal super block
hipLaunchKernelGGL(( clacpy_full_kernel) , dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
}
}
}
else {
// TODO: use hipMemcpy or hipMemcpy2D ?
for( unsigned int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( unsigned int j=0; j < super_grid.y; ++j ) { // full row
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
hipLaunchKernelGGL(( clacpy_full_kernel) , dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
CLACPY_BATCHED copies all or part of each two-dimensional matrix
dAarray[i] to matrix dBarray[i], for 0 <= i < batchcount.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of each matrix dA to be copied to dB.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
Otherwise: All of each matrix dA
@param[in]
m INTEGER
The number of rows of each matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of each matrix dA. N >= 0.
@param[in]
dAarray COMPLEX* array, dimension (batchCount)
Array of pointers to the matrices dA, where each dA is of dimension (LDDA,N).
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
@param[in]
ldda INTEGER
The leading dimension of each array dA. LDDA >= max(1,M).
@param[out]
dBarray COMPLEX* array, dimension (batchCount)
Array of pointers to the matrices dB, where each dB is of dimension (LDDB,N).
The M-by-N matrix dB.
On exit, dB = dA in the locations specified by UPLO.
@param[in]
lddb INTEGER
The leading dimension of each array dB. LDDB >= max(1,M).
@param[in]
batchCount Number of matrices in dAarray and dBarray.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_clacpy_batched(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaFloatComplex_const_ptr const dAarray[], magma_int_t ldda,
magmaFloatComplex_ptr dBarray[], magma_int_t lddb,
magma_int_t batchCount, magma_queue_t queue )
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
else if ( batchCount < 0 )
info = -8;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 || batchCount == 0 ) {
return;
}
dim3 threads( BLK_X, 1, 1 );
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ), batchCount );
if ( uplo == MagmaLower ) {
hipLaunchKernelGGL(( clacpy_lower_kernel_batched)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, dAarray, ldda, dBarray, lddb );
}
else if ( uplo == MagmaUpper ) {
hipLaunchKernelGGL(( clacpy_upper_kernel_batched)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, dAarray, ldda, dBarray, lddb );
}
else {
hipLaunchKernelGGL(( clacpy_full_kernel_batched)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, dAarray, ldda, dBarray, lddb );
}
}
|
329d1c744f9acc528767e5bfc969875f3847b4f9.cu
|
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@author Mark Gates
@author Azzam Haidar
@generated from magmablas/zlacpy.cu normal z -> c, Tue Feb 9 16:05:28 2016
*/
#include "magma_internal.h"
// To deal with really large matrices, this launchs multiple super blocks,
// each with up to 64K-1 x 64K-1 thread blocks, which is up to 4194240 x 4194240 matrix with BLK=64.
// CUDA architecture 2.0 limits each grid dimension to 64K-1.
// Instances arose for vectors used by sparse matrices with M > 4194240, though N is small.
const magma_int_t max_blocks = 65535;
// BLK_X and BLK_Y need to be equal for claset_q to deal with diag & offdiag
// when looping over super blocks.
// Formerly, BLK_X and BLK_Y could be different.
#define BLK_X 64
#define BLK_Y BLK_X
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to claset, clacpy, clag2z, clag2z, cgeadd.
*/
static __device__
void clacpy_full_device(
int m, int n,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
/*
Similar to clacpy_full, but updates only the diagonal and below.
Blocks that are fully above the diagonal exit immediately.
Code similar to claset, clacpy, zlat2c, clat2z.
*/
static __device__
void clacpy_lower_device(
int m, int n,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (below diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y));
/* do only rows inside matrix, and blocks not above diag */
if ( ind < m && ind + BLK_X > iby ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n && ind >= iby+j; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
/*
Similar to clacpy_full, but updates only the diagonal and above.
Blocks that are fully below the diagonal exit immediately.
Code similar to claset, clacpy, zlat2c, clat2z.
*/
static __device__
void clacpy_upper_device(
int m, int n,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (above diag) */
bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby));
/* do only rows inside matrix, and blocks not below diag */
if ( ind < m && ind < iby + BLK_Y ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( ind <= iby+j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
}
//////////////////////////////////////////////////////////////////////////////////////
/*
kernel wrappers to call the device functions.
*/
__global__
void clacpy_full_kernel(
int m, int n,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
clacpy_full_device(m, n, dA, ldda, dB, lddb);
}
__global__
void clacpy_lower_kernel(
int m, int n,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
clacpy_lower_device(m, n, dA, ldda, dB, lddb);
}
__global__
void clacpy_upper_kernel(
int m, int n,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
clacpy_upper_device(m, n, dA, ldda, dB, lddb);
}
//////////////////////////////////////////////////////////////////////////////////////
/*
kernel wrappers to call the device functions for the batched routine.
*/
__global__
void clacpy_full_kernel_batched(
int m, int n,
magmaFloatComplex const * const *dAarray, int ldda,
magmaFloatComplex **dBarray, int lddb )
{
int batchid = blockIdx.z;
clacpy_full_device(m, n, dAarray[batchid], ldda, dBarray[batchid], lddb);
}
__global__
void clacpy_lower_kernel_batched(
int m, int n,
magmaFloatComplex const * const *dAarray, int ldda,
magmaFloatComplex **dBarray, int lddb )
{
int batchid = blockIdx.z;
clacpy_lower_device(m, n, dAarray[batchid], ldda, dBarray[batchid], lddb);
}
__global__
void clacpy_upper_kernel_batched(
int m, int n,
magmaFloatComplex const * const *dAarray, int ldda,
magmaFloatComplex **dBarray, int lddb )
{
int batchid = blockIdx.z;
clacpy_upper_device(m, n, dAarray[batchid], ldda, dBarray[batchid], lddb);
}
//////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
CLACPY copies all or part of a two-dimensional matrix dA to another
matrix dB.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be copied to dB.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
- = MagmaFull: All of the matrix dA
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
dA COMPLEX array, dimension (LDDA,N)
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[out]
dB COMPLEX array, dimension (LDDB,N)
The M-by-N matrix dB.
On exit, dB = dA in the locations specified by UPLO.
@param[in]
lddb INTEGER
The leading dimension of the array dB. LDDB >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_clacpy_q(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_ptr dB, magma_int_t lddb,
magma_queue_t queue )
{
#define dA(i_, j_) (dA + (i_) + (j_)*ldda)
#define dB(i_, j_) (dB + (i_) + (j_)*lddb)
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( m == 0 || n == 0 ) {
return;
}
assert( BLK_X == BLK_Y );
const magma_int_t super_NB = max_blocks*BLK_X;
dim3 super_grid( magma_ceildiv( m, super_NB ), magma_ceildiv( n, super_NB ) );
dim3 threads( BLK_X, 1 );
dim3 grid;
magma_int_t mm, nn;
if ( uplo == MagmaLower ) {
for( unsigned int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( unsigned int j=0; j < super_grid.y && j <= i; ++j ) { // from left to diagonal
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
clacpy_lower_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
else { // off diagonal super block
clacpy_full_kernel <<< grid, threads, 0, queue->cuda_stream() >>>
( mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
}
}
}
else if ( uplo == MagmaUpper ) {
for( unsigned int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( unsigned int j=i; j < super_grid.y; ++j ) { // from diagonal to right
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
clacpy_upper_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
else { // off diagonal super block
clacpy_full_kernel <<< grid, threads, 0, queue->cuda_stream() >>>
( mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
}
}
}
else {
// TODO: use cudaMemcpy or cudaMemcpy2D ?
for( unsigned int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( unsigned int j=0; j < super_grid.y; ++j ) { // full row
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
clacpy_full_kernel <<< grid, threads, 0, queue->cuda_stream() >>>
( mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
CLACPY_BATCHED copies all or part of each two-dimensional matrix
dAarray[i] to matrix dBarray[i], for 0 <= i < batchcount.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of each matrix dA to be copied to dB.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
Otherwise: All of each matrix dA
@param[in]
m INTEGER
The number of rows of each matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of each matrix dA. N >= 0.
@param[in]
dAarray COMPLEX* array, dimension (batchCount)
Array of pointers to the matrices dA, where each dA is of dimension (LDDA,N).
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
@param[in]
ldda INTEGER
The leading dimension of each array dA. LDDA >= max(1,M).
@param[out]
dBarray COMPLEX* array, dimension (batchCount)
Array of pointers to the matrices dB, where each dB is of dimension (LDDB,N).
The M-by-N matrix dB.
On exit, dB = dA in the locations specified by UPLO.
@param[in]
lddb INTEGER
The leading dimension of each array dB. LDDB >= max(1,M).
@param[in]
batchCount Number of matrices in dAarray and dBarray.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_clacpy_batched(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaFloatComplex_const_ptr const dAarray[], magma_int_t ldda,
magmaFloatComplex_ptr dBarray[], magma_int_t lddb,
magma_int_t batchCount, magma_queue_t queue )
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
else if ( batchCount < 0 )
info = -8;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 || batchCount == 0 ) {
return;
}
dim3 threads( BLK_X, 1, 1 );
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ), batchCount );
if ( uplo == MagmaLower ) {
clacpy_lower_kernel_batched
<<< grid, threads, 0, queue->cuda_stream() >>>
( m, n, dAarray, ldda, dBarray, lddb );
}
else if ( uplo == MagmaUpper ) {
clacpy_upper_kernel_batched
<<< grid, threads, 0, queue->cuda_stream() >>>
( m, n, dAarray, ldda, dBarray, lddb );
}
else {
clacpy_full_kernel_batched
<<< grid, threads, 0, queue->cuda_stream() >>>
( m, n, dAarray, ldda, dBarray, lddb );
}
}
|
0685b79a41cff25e6f4499cf350f3f2dd9e60285.hip
|
// !!! This is a file automatically generated by hipify!!!
// Hybrid MPI+OpenMP+CUDA computation of Pi
#include <stdio.h>
#include <mpi.h>
#include <omp.h>
#include <hip/hip_runtime.h>
#define NBIN 10000000 // Number of bins
#define NUM_DEVICE 2 // # of GPU devices = # of OpenMP threads
#define NUM_BLOCK 13 // Number of thread blocks
#define NUM_THREAD 192 // Number of threads per block
// Kernel that executes on the CUDA device
__global__ void cal_pi(float *sum,int nbin,float step,float offset,int nthreads,int nblocks) {
int i;
float x;
int idx = blockIdx.x*blockDim.x+threadIdx.x; // Sequential thread index across the blocks
for (i=idx; i<nbin; i+=nthreads*nblocks) { // Interleaved bin assignment to threads
x = offset+(i+0.5)*step;
sum[idx] += 4.0/(1.0+x*x);
}
}
int main(int argc,char **argv) {
int myid,nproc,nbin,tid;
int mpid;
float step,offset,pi=0.0,pig;
dim3 dimGrid(NUM_BLOCK,1,1); // Grid dimensions (only use 1D)
dim3 dimBlock(NUM_THREAD,1,1); // Block dimensions (only use 1D)
float *sumHost,*sumDev; // Pointers to host & device arrays
int dev_used;
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD,&myid); // My MPI rank
MPI_Comm_size(MPI_COMM_WORLD,&nproc); // Number of MPI processes
//nbin = NBIN/nproc; // Number of bins per MPI process
//step = 1.0/(float)(nbin*nproc); // Step size with redefined number of bins
//offset = myid*step*nbin; // Quadrature-point offset
omp_set_num_threads(NUM_DEVICE); // One OpenMP thread per GPU device
nbin = NBIN/(nproc*NUM_DEVICE); // # of bins per OpenMP thread
step = 1.0/(float)(nbin*nproc*NUM_DEVICE);
#pragma omp parallel private(mpid,offset,SumHost,sumDev,tid,dev_used) reduction(+:pi)
{
mpid = omp_get_thread_num();
offset = (NUM_DEVICE*myid+mpid)*step*nbin; // Quadrature-point offset
hipSetDevice(mpid%2);
//hipSetDevice(myid%2);
size_t size = NUM_BLOCK*NUM_THREAD*sizeof(float); //Array memory size
sumHost = (float *)malloc(size); // Allocate array on host
hipMalloc((void **) &sumDev,size); // Allocate array on device
hipMemset(sumDev,0,size); // Reset array in device to 0
// Calculate on device (call CUDA kernel)
hipLaunchKernelGGL(( cal_pi) , dim3(dimGrid),dim3(dimBlock), 0, 0, sumDev,nbin,step,offset,NUM_THREAD,NUM_BLOCK);
// Retrieve result from device and store it in host array
hipMemcpy(sumHost,sumDev,size,hipMemcpyDeviceToHost);
// Reduction over CUDA threads
for(tid=0; tid<NUM_THREAD*NUM_BLOCK; tid++)
pi += sumHost[tid];
pi *= step;
// CUDA cleanup
free(sumHost);
hipFree(sumDev);
hipGetDevice(&dev_used);
//printf("myid = %d: device used = %d; partial pi = %f\n",myid,dev_used,pi);
printf("myid = %d; mpid = %d: device used = %d; partial pi = %f\n", myid, mpid, dev_used, pi);
} // End omp parallel
// Reduction over MPI processes
MPI_Allreduce(&pi,&pig,1,MPI_FLOAT,MPI_SUM,MPI_COMM_WORLD);
if (myid==0) printf("PI = %f\n",pig);
MPI_Finalize();
return 0;
}
|
0685b79a41cff25e6f4499cf350f3f2dd9e60285.cu
|
// Hybrid MPI+OpenMP+CUDA computation of Pi
#include <stdio.h>
#include <mpi.h>
#include <omp.h>
#include <cuda.h>
#define NBIN 10000000 // Number of bins
#define NUM_DEVICE 2 // # of GPU devices = # of OpenMP threads
#define NUM_BLOCK 13 // Number of thread blocks
#define NUM_THREAD 192 // Number of threads per block
// Kernel that executes on the CUDA device
__global__ void cal_pi(float *sum,int nbin,float step,float offset,int nthreads,int nblocks) {
int i;
float x;
int idx = blockIdx.x*blockDim.x+threadIdx.x; // Sequential thread index across the blocks
for (i=idx; i<nbin; i+=nthreads*nblocks) { // Interleaved bin assignment to threads
x = offset+(i+0.5)*step;
sum[idx] += 4.0/(1.0+x*x);
}
}
int main(int argc,char **argv) {
int myid,nproc,nbin,tid;
int mpid;
float step,offset,pi=0.0,pig;
dim3 dimGrid(NUM_BLOCK,1,1); // Grid dimensions (only use 1D)
dim3 dimBlock(NUM_THREAD,1,1); // Block dimensions (only use 1D)
float *sumHost,*sumDev; // Pointers to host & device arrays
int dev_used;
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD,&myid); // My MPI rank
MPI_Comm_size(MPI_COMM_WORLD,&nproc); // Number of MPI processes
//nbin = NBIN/nproc; // Number of bins per MPI process
//step = 1.0/(float)(nbin*nproc); // Step size with redefined number of bins
//offset = myid*step*nbin; // Quadrature-point offset
omp_set_num_threads(NUM_DEVICE); // One OpenMP thread per GPU device
nbin = NBIN/(nproc*NUM_DEVICE); // # of bins per OpenMP thread
step = 1.0/(float)(nbin*nproc*NUM_DEVICE);
#pragma omp parallel private(mpid,offset,SumHost,sumDev,tid,dev_used) reduction(+:pi)
{
mpid = omp_get_thread_num();
offset = (NUM_DEVICE*myid+mpid)*step*nbin; // Quadrature-point offset
cudaSetDevice(mpid%2);
//cudaSetDevice(myid%2);
size_t size = NUM_BLOCK*NUM_THREAD*sizeof(float); //Array memory size
sumHost = (float *)malloc(size); // Allocate array on host
cudaMalloc((void **) &sumDev,size); // Allocate array on device
cudaMemset(sumDev,0,size); // Reset array in device to 0
// Calculate on device (call CUDA kernel)
cal_pi <<<dimGrid,dimBlock>>> (sumDev,nbin,step,offset,NUM_THREAD,NUM_BLOCK);
// Retrieve result from device and store it in host array
cudaMemcpy(sumHost,sumDev,size,cudaMemcpyDeviceToHost);
// Reduction over CUDA threads
for(tid=0; tid<NUM_THREAD*NUM_BLOCK; tid++)
pi += sumHost[tid];
pi *= step;
// CUDA cleanup
free(sumHost);
cudaFree(sumDev);
cudaGetDevice(&dev_used);
//printf("myid = %d: device used = %d; partial pi = %f\n",myid,dev_used,pi);
printf("myid = %d; mpid = %d: device used = %d; partial pi = %f\n", myid, mpid, dev_used, pi);
} // End omp parallel
// Reduction over MPI processes
MPI_Allreduce(&pi,&pig,1,MPI_FLOAT,MPI_SUM,MPI_COMM_WORLD);
if (myid==0) printf("PI = %f\n",pig);
MPI_Finalize();
return 0;
}
|
e3bd13d5b5c0abd94612c2f7a5d3f152f60e3b7f.hip
|
// !!! This is a file automatically generated by hipify!!!
/* ==========================================================================
textureCube.cu
==========================================================================
Main wrapper + kernel that changes the colors of the four faces
*/
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "utils.h"
#define PI 3.1415926536f
// --------------------------------------------------------------------------
// Kernel
// --------------------------------------------------------------------------
// Paint a 2D surface with a moving bulls-eye pattern. The "face" parameter selects
// between 6 different colors to use. We will use a different color on each face of a
// cube map.
__global__ void CudaKernelTextureCubeStrobelight(char *surface, int width, int height, size_t pitch, int face, float t)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned char *pixel;
// in the case where, due to quantization into grids, we have
// more threads than pixels, skip the threads which don't
// correspond to valid pixels
if (x >= width || y >= height) return;
// get a pointer to this pixel
pixel = (unsigned char *)(surface + y*pitch) + 4 * x;
// populate it
float theta_x = (2.0f*x) / width - 1.0f;
float theta_y = (2.0f*y) / height - 1.0f;
float theta = 2.0f*PI*sqrt(theta_x*theta_x + theta_y*theta_y);
unsigned char value = 255 * (0.6f + 0.4f*cos(theta + t));
pixel[3] = 255; // alpha
if (face % 2)
{
pixel[0] = // blue
pixel[1] = // green
pixel[2] = 0.5; // red
pixel[face / 2] = value;
}
else
{
pixel[0] = // blue
pixel[1] = // green
pixel[2] = value; // red
pixel[face / 2] = 0.5;
}
}
// --------------------------------------------------------------------------
// Wrapper
// --------------------------------------------------------------------------
// Sets up grid / blocks, launches kernel
extern "C"
void CudaWrapperTextureCubeStrobelight(void *surface, int width, int height, size_t pitch, int face, float t)
{
hipError_t error = hipSuccess;
dim3 Db = dim3(16, 16); // block dimensions are fixed to be 256 threads
dim3 Dg = dim3((width + Db.x - 1) / Db.x, (height + Db.y - 1) / Db.y);
hipLaunchKernelGGL(( CudaKernelTextureCubeStrobelight) , dim3(Dg), dim3(Db) , 0, 0, (char *)surface, width, height, pitch, face, t);
ProcessCudaError("cuda_kernel_texture_cube() failed to launch error: ");
}
|
e3bd13d5b5c0abd94612c2f7a5d3f152f60e3b7f.cu
|
/* ==========================================================================
textureCube.cu
==========================================================================
Main wrapper + kernel that changes the colors of the four faces
*/
#include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "utils.h"
#define PI 3.1415926536f
// --------------------------------------------------------------------------
// Kernel
// --------------------------------------------------------------------------
// Paint a 2D surface with a moving bulls-eye pattern. The "face" parameter selects
// between 6 different colors to use. We will use a different color on each face of a
// cube map.
__global__ void CudaKernelTextureCubeStrobelight(char *surface, int width, int height, size_t pitch, int face, float t)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned char *pixel;
// in the case where, due to quantization into grids, we have
// more threads than pixels, skip the threads which don't
// correspond to valid pixels
if (x >= width || y >= height) return;
// get a pointer to this pixel
pixel = (unsigned char *)(surface + y*pitch) + 4 * x;
// populate it
float theta_x = (2.0f*x) / width - 1.0f;
float theta_y = (2.0f*y) / height - 1.0f;
float theta = 2.0f*PI*sqrt(theta_x*theta_x + theta_y*theta_y);
unsigned char value = 255 * (0.6f + 0.4f*cos(theta + t));
pixel[3] = 255; // alpha
if (face % 2)
{
pixel[0] = // blue
pixel[1] = // green
pixel[2] = 0.5; // red
pixel[face / 2] = value;
}
else
{
pixel[0] = // blue
pixel[1] = // green
pixel[2] = value; // red
pixel[face / 2] = 0.5;
}
}
// --------------------------------------------------------------------------
// Wrapper
// --------------------------------------------------------------------------
// Sets up grid / blocks, launches kernel
extern "C"
void CudaWrapperTextureCubeStrobelight(void *surface, int width, int height, size_t pitch, int face, float t)
{
cudaError_t error = cudaSuccess;
dim3 Db = dim3(16, 16); // block dimensions are fixed to be 256 threads
dim3 Dg = dim3((width + Db.x - 1) / Db.x, (height + Db.y - 1) / Db.y);
CudaKernelTextureCubeStrobelight <<<Dg, Db >>>((char *)surface, width, height, pitch, face, t);
ProcessCudaError("cuda_kernel_texture_cube() failed to launch error: ");
}
|
6fe44b0bee3b60c42ea7a03620c11e045e674351.hip
|
// !!! This is a file automatically generated by hipify!!!
//#define GPU_ANALYSIS_DEBUG
#include <vector>
#include <stdio.h>
#include <stdlib.h>
#include "headers/params.h"
#include "headers/device_BC_plan.h"
#include "headers/device_peak_find.h"
#include "headers/device_MSD_BLN_grid.h"
#include "headers/device_MSD_BLN_pw.h"
//#include "headers/device_MSD_BLN_pw_dp.h"
#include "headers/device_MSD_limited.h"
#include "headers/device_SPS_long.h"
#include "headers/device_threshold.h"
#include "headers/device_single_FIR.h"
#include "timer.h"
//---------------------------------------------------------------------------------
//-------> Kahan MSD
void d_kahan_summation(float *signal, int nDMs, int nTimesamples, int offset, float *result, float *error){
double sum;
double sum_error;
double a,b;
sum=0;
sum_error=0;
for(int d=0;d<nDMs; d++){
for(int s=0; s<(nTimesamples-offset); s++){
a=signal[(size_t) (d*nTimesamples + s)]-sum_error;
b=sum+a;
sum_error=(b-sum);
sum_error=sum_error-a;
sum=b;
}
}
*result=sum;
*error=sum_error;
}
void d_kahan_sd(float *signal, int nDMs, int nTimesamples, int offset, double mean, float *result, float *error){
double sum;
double sum_error;
double a,b,dtemp;
sum=0;
sum_error=0;
for(int d=0;d<nDMs; d++){
for(int s=0; s<(nTimesamples-offset); s++){
dtemp=(signal[(size_t) (d*nTimesamples + s)]-sum_error - mean);
a=dtemp*dtemp;
b=sum+a;
sum_error=(b-sum);
sum_error=sum_error-a;
sum=b;
}
}
*result=sum;
*error=sum_error;
}
void MSD_Kahan(float *h_input, int nDMs, int nTimesamples, int offset, double *mean, double *sd){
float error, signal_mean, signal_sd;
int nElements=nDMs*(nTimesamples-offset);
d_kahan_summation(h_input, nDMs, nTimesamples, offset, &signal_mean, &error);
signal_mean=signal_mean/nElements;
d_kahan_sd(h_input, nDMs, nTimesamples, offset, signal_mean, &signal_sd, &error);
signal_sd=sqrt(signal_sd/nElements);
*mean=signal_mean;
*sd=signal_sd;
}
void MSD_on_GPU(float *h_input, float *d_input, float *d_MSD, float *signal_mean, float *signal_sd, float *signal_mean_bln, float *signal_sd_bln, float *signal_mean_bl_bln, float *signal_sd_bl_bln, int nDMs, int nTimesamples, int offset, float sigma_constant, float *MSD_limited_time, float *MSD_BLN_pw_time, float *MSD_BLN_grid_time){
GpuTimer timer;
float h_MSD[3];
hipMemcpy( d_input, h_input, ((size_t) nDMs*nTimesamples)*sizeof(float), hipMemcpyHostToDevice);
timer.Start();
MSD_limited(d_input, d_MSD, nDMs, nTimesamples, offset);
timer.Stop();
(*MSD_limited_time) = timer.Elapsed();
hipMemcpy(h_MSD, d_MSD, 3*sizeof(float), hipMemcpyDeviceToHost);
(*signal_mean) = h_MSD[0];
(*signal_sd) = h_MSD[1];
timer.Start();
MSD_BLN_pw(d_input, d_MSD, nDMs, nTimesamples, offset, sigma_constant);
timer.Stop();
(*MSD_BLN_pw_time) = timer.Elapsed();
hipMemcpy(h_MSD, d_MSD, 3*sizeof(float), hipMemcpyDeviceToHost);
(*signal_mean_bln) = h_MSD[0];
(*signal_sd_bln) = h_MSD[1];
timer.Start();
MSD_BLN_grid(d_input, d_MSD, 32, 32, nDMs, nTimesamples, offset, sigma_constant);
timer.Stop();
(*MSD_BLN_grid_time) = timer.Elapsed();
hipMemcpy(h_MSD, d_MSD, 3*sizeof(float), hipMemcpyDeviceToHost);
(*signal_mean_bl_bln) = h_MSD[0];
(*signal_sd_bl_bln) = h_MSD[1];
}
void MSD_on_GPU_LA(float *h_input, float *d_input, float *d_MSD, float *h_MSD_LA, float *h_MSD_BLN_LA, int nDMs, int nTimesamples, int offset, float sigma_constant){
hipMemcpy( d_input, h_input, ((size_t) nDMs*nTimesamples)*sizeof(float), hipMemcpyHostToDevice);
MSD_linear_approximation(d_input, d_MSD, 32, nDMs, nTimesamples, offset);
hipMemcpy(h_MSD_LA, d_MSD, 3*sizeof(float), hipMemcpyDeviceToHost);
MSD_BLN_LA_pw_normal(d_input, d_MSD, 32, nDMs, nTimesamples, offset, sigma_constant);
hipMemcpy(h_MSD_BLN_LA, d_MSD, 3*sizeof(float), hipMemcpyDeviceToHost);
}
void MSD_on_GPU_halfed(float *h_input, float *d_input, float *d_MSD, float *signal_mean, float *signal_sd, float *signal_mean_bln, float *signal_sd_bln, int nDMs, int nTimesamples, int offset, float sigma_constant){
float h_MSD[3];
float *h_temp;
int dt=nTimesamples/2;
h_temp = new float[nDMs*dt];
for(int d=0; d<nDMs; d++){
for(int s=0; s<dt; s++){
h_temp[d*dt + s] = h_input[d*nTimesamples + 2*s];
}
}
hipMemcpy( d_input, h_temp, ((size_t) nDMs*dt)*sizeof(float), hipMemcpyHostToDevice);
MSD_limited(d_input, d_MSD, nDMs, dt, offset/2);
hipMemcpy(h_MSD, d_MSD, 3*sizeof(float), hipMemcpyDeviceToHost);
(*signal_mean) = h_MSD[0];
(*signal_sd) = h_MSD[1];
MSD_BLN_pw(d_input, d_MSD, nDMs, dt, offset/2, sigma_constant);
hipMemcpy(h_MSD, d_MSD, 3*sizeof(float), hipMemcpyDeviceToHost);
(*signal_mean_bln) = h_MSD[0];
(*signal_sd_bln) = h_MSD[1];
delete [] h_temp;
}
//-------> Kahan MSD
//---------------------------------------------------------------------------------
void Calculate_FIR(float *input, float *output, int nTaps, int nDMs, int nTimesamples, int ut) {
int d,s,t;
float ftemp;
for(d=0; d<nDMs; d++){
for(s=0; s<nTimesamples-ut; s++){
ftemp=0;
for(t=0; t<nTaps; t++){
ftemp+=input[d*nTimesamples + s + t];
}
output[d*nTimesamples + s]=ftemp;
}
}
}
void Decimate_in_time(float *h_input, float *h_CPU_decimate, int DIT_value, int DIT_factor, int nDMs, int nTimesamples, int offset){
float ftemp;
int decimated_timesamples;
decimated_timesamples=nTimesamples/(DIT_value*DIT_factor);
for(int d=0; d<nDMs; d++){
for(int s=0; s<decimated_timesamples; s++){
ftemp=0;
for(int t=0; t<DIT_factor; t++){
ftemp = ftemp + h_input[d*decimated_timesamples*DIT_factor + s*DIT_factor + t];
}
h_CPU_decimate[d*decimated_timesamples + s]=ftemp;
}
}
}
void Export_data(float *input, size_t nDMs, size_t nTimesamples, char *filename){
FILE *fp_out;
char mod_filename[200];
sprintf(mod_filename,"%s.dat",filename);
if (( fp_out = fopen(filename, "wb") ) == NULL) {
fprintf(stderr, "Error opening output file!\n");
exit(0);
}
fwrite(input, (nDMs*nTimesamples)*sizeof(float), 4, fp_out);
fclose(fp_out);
for(int d=0; d<nDMs; d++){
sprintf(mod_filename,"%s_dm%d.dat",filename,d);
if (( fp_out = fopen(filename, "wb") ) == NULL) {
fprintf(stderr, "Error opening output file!\n");
exit(0);
}
fwrite(&input[d*nTimesamples], nTimesamples*sizeof(float), 4, fp_out);
fclose(fp_out);
}
}
void export_file_nDM_nTimesamples(float *data, int nDMs, int nTimesamples, char *filename) {
FILE *file_out;
char str[200];
sprintf(str, "%s_DM.dat", filename);
if (( file_out = fopen(str, "w") ) == NULL) {
fprintf(stderr, "Error opening output file!\n");
exit(0);
}
printf("export nDMs\n");
for (int s = 0; s < nTimesamples; s++) {
for (int d = 0; d < nDMs; d++) {
fprintf(file_out, "%f ", data[d*nTimesamples + s]);
}
fprintf(file_out, "\n");
}
fclose(file_out);
sprintf(str, "%s_Time.dat", filename);
if (( file_out = fopen(str, "w") ) == NULL) {
fprintf(stderr, "Error opening output file!\n");
exit(0);
}
printf("export nTimesamples\n");
for (int d = 0; d < nDMs; d++) {
for (int s = 0; s < nTimesamples; s++) {
fprintf(file_out, "%f ", data[d*nTimesamples + s]);
}
fprintf(file_out, "\n");
}
fclose(file_out);
}
void Create_PD_plan(std::vector<PulseDetection_plan> *PD_plan, std::vector<int> *BC_widths, int nDMs, int nTimesamples){
int Elements_per_block, itemp, nRest;
PulseDetection_plan PDmp;
if(BC_widths->size()>0){
PDmp.shift = 0;
PDmp.output_shift = 0;
PDmp.startTaps = 0;
PDmp.iteration = 0;
PDmp.decimated_timesamples = nTimesamples;
PDmp.dtm = (nTimesamples>>(PDmp.iteration+1));
PDmp.dtm = PDmp.dtm - (PDmp.dtm&1);
PDmp.nBoxcars = BC_widths->operator[](0);
Elements_per_block = PD_NTHREADS*2 - PDmp.nBoxcars;
itemp = PDmp.decimated_timesamples;
PDmp.nBlocks = itemp/Elements_per_block;
nRest = itemp - PDmp.nBlocks*Elements_per_block;
if(nRest>0) PDmp.nBlocks++;
PDmp.unprocessed_samples = PDmp.nBoxcars + 6;
if(PDmp.decimated_timesamples<PDmp.unprocessed_samples) PDmp.nBlocks=0;
PDmp.total_ut = PDmp.unprocessed_samples;
PD_plan->push_back(PDmp);
for(int f=1; f< (int) BC_widths->size(); f++){
// These are based on previous values of PDmp
PDmp.shift = PDmp.nBoxcars/2;
PDmp.output_shift = PDmp.output_shift + PDmp.decimated_timesamples;
PDmp.startTaps = PDmp.startTaps + PDmp.nBoxcars*(1<<PDmp.iteration);
PDmp.iteration = PDmp.iteration + 1;
// Definition of new PDmp values
PDmp.decimated_timesamples = PDmp.dtm;
PDmp.dtm = (nTimesamples>>(PDmp.iteration+1));
PDmp.dtm = PDmp.dtm - (PDmp.dtm&1);
PDmp.nBoxcars = BC_widths->operator[](f);
Elements_per_block=PD_NTHREADS*2 - PDmp.nBoxcars;
itemp = PDmp.decimated_timesamples;
PDmp.nBlocks = itemp/Elements_per_block;
nRest = itemp - PDmp.nBlocks*Elements_per_block;
if(nRest>0) PDmp.nBlocks++;
PDmp.unprocessed_samples = PDmp.unprocessed_samples/2 + PDmp.nBoxcars + 6; //
if(PDmp.decimated_timesamples<PDmp.unprocessed_samples) PDmp.nBlocks=0;
PDmp.total_ut = PDmp.unprocessed_samples*(1<<PDmp.iteration);
PD_plan->push_back(PDmp);
}
}
}
int Get_max_iteration(int max_boxcar_width, std::vector<int> *BC_widths, int *max_width_performed){
int startTaps, iteration;
startTaps = 0;
iteration = 0;
for(int f=0; f<(int) BC_widths->size(); f++){
startTaps = startTaps + BC_widths->operator[](f)*(1<<f);
if(startTaps>=max_boxcar_width) {
iteration = f+1;
break;
}
}
if(max_boxcar_width>startTaps) {
iteration=(int) BC_widths->size();
}
*max_width_performed=startTaps;
return(iteration);
}
void analysis_GPU(float *h_peak_list, size_t *peak_pos, size_t max_peak_size, int i, float tstart, int t_processed, int inBin, int outBin, int *maxshift, int max_ndms, int *ndms, float cutoff, float sigma_constant, float max_boxcar_width_in_sec, float *output_buffer, float *dm_low, float *dm_high, float *dm_step, float tsamp, int candidate_algorithm, int enable_sps_baselinenoise){
int max_boxcar_width = (int) (max_boxcar_width_in_sec/tsamp);
int max_width_performed=0;
//unsigned long int j;
unsigned long int vals;
int nTimesamples = t_processed;
int nDMs = ndms[i];
int temp_peak_pos;
//double total;
// Calculate the total number of values
vals = (unsigned long int) ( nDMs*nTimesamples );
double total_time, partial_time;
//float max, min, threshold;
int max_iteration;
int t_BC_widths[10]={PD_MAXTAPS,16,16,16,8,8,8,8,8,8};
std::vector<int> BC_widths(t_BC_widths,t_BC_widths+sizeof(t_BC_widths)/sizeof(int));
std::vector<PulseDetection_plan> PD_plan;
//---------------------------------------------------------------------------
//----------> GPU part
printf("\n----------> GPU analysis part\n");
printf(" Dimensions nDMs:%d; nTimesamples:%d; inBin:%d; outBin:%d; maxshift:%d; \n", ndms[i], t_processed, inBin, outBin, *maxshift);
GpuTimer timer;
//float h_MSD[3];
float *d_MSD;
checkCudaErrors(hipGetLastError());
if ( hipSuccess != hipMalloc((void**) &d_MSD, sizeof(float)*3)) {printf("Allocation error!\n"); exit(201);}
total_time = 0;
/*
//-------------- CPU check
float *h_temp;
double signal_mean, signal_sd;
h_temp = (float *)malloc( ((size_t) nDMs*nTimesamples)*sizeof(float));
memset(h_temp, 0.0, ((size_t) nDMs*nTimesamples)*sizeof(float));
hipMemcpy( h_temp, output_buffer, ((size_t) nDMs*nTimesamples)*sizeof(float), hipMemcpyDeviceToHost);
MSD_Kahan(h_temp, nDMs, nTimesamples, 0, &signal_mean, &signal_sd);
printf("MSD_kahan: after 1 tap Mean: %e, Standard deviation: %e;\n",signal_mean, signal_sd);
//-------------- CPU check
*/
/*
//-------------- One Call linear approximation
timer.Start();
MSD_linear_approximation(output_buffer, d_MSD, PD_MAXTAPS, nDMs, nTimesamples, 0);
timer.Stop();
partial_time = timer.Elapsed();
total_time += partial_time;
hipMemcpy(h_MSD, d_MSD, 3*sizeof(float), hipMemcpyDeviceToHost);
printf(" MSD linear approximation: Mean: %f, Stddev: %f, modifier: %f\n", h_MSD[0], h_MSD[1], h_MSD[2]);
#ifdef GPU_ANALYSIS_DEBUG
printf(" One kernel took:%f ms\n", partial_time);
#endif
//-------------- One Call linear approximation
*/
/*
//-------------- Base level noise point-wise
timer.Start();
MSD_BLN_pw(output_buffer, d_MSD, nDMs, nTimesamples, 0, sigma_constant);
timer.Stop();
partial_time = timer.Elapsed();
total_time += partial_time;
hipMemcpy(h_MSD, d_MSD, 3*sizeof(float), hipMemcpyDeviceToHost);
printf(" MSD BLN point-wise: Mean: %f, Stddev: %f, modifier: %f\n", h_MSD[0], h_MSD[1], h_MSD[2]);
#ifdef GPU_ANALYSIS_DEBUG
printf(" MSD BLN point-wise kernel took:%f ms\n", partial_time);
#endif
//-------------- Base level noise point-wise
*/
/*
//-------------- BLN_LA
timer.Start();
MSD_BLN_LA_pw_normal(output_buffer, d_MSD, nDMs, nTimesamples, PD_MAXTAPS, 0, sigma_constant);
timer.Stop();
partial_time = timer.Elapsed();
total_time += partial_time;
hipMemcpy(h_MSD, d_MSD, 3*sizeof(float), hipMemcpyDeviceToHost);
printf(" MSD BLN linear approximation: Mean: %f, Stddev: %f, modifier: %f\n", h_MSD[0], h_MSD[1], h_MSD[2]);
#ifdef GPU_ANALYSIS_DEBUG
printf(" BLN LA took:%f ms\n", partial_time);
#endif
//-------------- BLN_LA
*/
/*
//-------------- Base level noise grid
timer.Start();
MSD_BLN_grid(output_buffer, d_MSD, 32, 32, nDMs, nTimesamples, 0, sigma_constant);
timer.Stop();
partial_time = timer.Elapsed();
total_time += partial_time;
hipMemcpy(h_MSD, d_MSD, 3*sizeof(float), hipMemcpyDeviceToHost);
printf(" MSD BLN grid: Mean: %f, Stddev: %f, modifier: %f\n", h_MSD[0], h_MSD[1], h_MSD[2]);
#ifdef GPU_ANALYSIS_DEBUG
printf(" MSD BLN grid kernel took:%f ms\n", partial_time);
#endif
//-------------- Base level noise grid
*/
size_t free_mem,total_mem;
hipMemGetInfo(&free_mem,&total_mem);
printf(" Memory required by boxcar filters:%0.3f MB\n",(4.5*vals*sizeof(float) + 2*vals*sizeof(ushort))/(1024.0*1024) );
printf(" Memory available:%0.3f MB \n", ((float) free_mem)/(1024.0*1024.0) );
std::vector<int> DM_list;
unsigned long int max_timesamples=(free_mem*0.95)/(5.5*sizeof(float) + 2*sizeof(ushort));
int DMs_per_cycle = max_timesamples/nTimesamples;
int nRepeats, nRest, DM_shift, itemp, local_max_list_size;//BC_shift,
itemp = (int) (DMs_per_cycle/THR_WARPS_PER_BLOCK);
DMs_per_cycle = itemp*THR_WARPS_PER_BLOCK;
nRepeats = nDMs/DMs_per_cycle;
nRest = nDMs - nRepeats*DMs_per_cycle;
local_max_list_size = (DMs_per_cycle*nTimesamples)/4;
for(int f=0; f<nRepeats; f++) DM_list.push_back(DMs_per_cycle);
if(nRest>0) DM_list.push_back(nRest);
printf(" SPS will run %d batches each containing %d DM trials. Remainder %d DM trials\n", (int) DM_list.size(), DMs_per_cycle, nRest);
max_iteration = Get_max_iteration(max_boxcar_width/inBin, &BC_widths, &max_width_performed);
printf(" Selected iteration:%d; maximum boxcar width requested:%d; maximum boxcar width performed:%d;\n", max_iteration, max_boxcar_width/inBin, max_width_performed);
Create_PD_plan(&PD_plan, &BC_widths, 1, nTimesamples);
if(DM_list.size()>0){
DMs_per_cycle = DM_list[0];
float *d_peak_list;
if ( hipSuccess != hipMalloc((void**) &d_peak_list, sizeof(float)*DMs_per_cycle*nTimesamples)) printf("Allocation error! peaks\n");
float *d_decimated;
if ( hipSuccess != hipMalloc((void **) &d_decimated, sizeof(float)*(((DMs_per_cycle*nTimesamples)/2)+PD_MAXTAPS) )) printf("Allocation error! dedispered\n");
float *d_boxcar_values;
if ( hipSuccess != hipMalloc((void **) &d_boxcar_values, sizeof(float)*DMs_per_cycle*nTimesamples)) printf("Allocation error! boxcars\n");
float *d_output_SNR;
if ( hipSuccess != hipMalloc((void **) &d_output_SNR, sizeof(float)*2*DMs_per_cycle*nTimesamples)) printf("Allocation error! SNR\n");
ushort *d_output_taps;
if ( hipSuccess != hipMalloc((void **) &d_output_taps, sizeof(ushort)*2*DMs_per_cycle*nTimesamples)) printf("Allocation error! taps\n");
int *gmem_peak_pos;
hipMalloc((void**) &gmem_peak_pos, 1*sizeof(int));
hipMemset((void*) gmem_peak_pos, 0, sizeof(int));
DM_shift = 0;
for(int f=0; f<DM_list.size(); f++) {
//-------------- SPS BLN
timer.Start();
//PD_SEARCH_LONG_BLN(&output_buffer[DM_shift*nTimesamples], d_boxcar_values, d_decimated, d_output_SNR, d_output_taps, d_MSD, &PD_plan, max_iteration, DM_list[f], nTimesamples);
//PD_SEARCH_LONG_BLN_EACH(&output_buffer[DM_shift*nTimesamples], d_boxcar_values, d_decimated, d_output_SNR, d_output_taps, &PD_plan, max_iteration, DM_list[f], nTimesamples, sigma_constant);
//PD_SEARCH_LONG_LINAPPROX(&output_buffer[DM_shift*nTimesamples], d_boxcar_values, d_decimated, d_output_SNR, d_output_taps, d_MSD, &PD_plan, max_iteration, DM_list[f], nTimesamples);
if(enable_sps_baselinenoise){
PD_SEARCH_LONG_BLN_LINAPPROX_EACH(&output_buffer[DM_shift*nTimesamples], d_boxcar_values, d_decimated, d_output_SNR, d_output_taps, &PD_plan, max_iteration, DM_list[f], nTimesamples, sigma_constant);
}
else {
PD_SEARCH_LONG_LINAPPROX_EACH(&output_buffer[DM_shift*nTimesamples], d_boxcar_values, d_decimated, d_output_SNR, d_output_taps, &PD_plan, max_iteration, DM_list[f], nTimesamples);
}
//
timer.Stop();
partial_time = timer.Elapsed();
total_time += partial_time;
#ifdef GPU_ANALYSIS_DEBUG
printf("PD_SEARCH took:%f ms\n", partial_time);
#endif
//-------------- SPS BLN
checkCudaErrors(hipGetLastError());
#ifdef GPU_ANALYSIS_DEBUG
printf("BC_shift:%d; DMs_per_cycle:%d; f*DMs_per_cycle:%d; max_iteration:%d;\n", DM_shift*nTimesamples, DM_list[f], DM_shift, max_iteration);
#endif
if(candidate_algorithm==1){
//-------------- Thresholding
timer.Start();
THRESHOLD(d_output_SNR, d_output_taps, d_peak_list, gmem_peak_pos, cutoff, DM_list[f], nTimesamples, DM_shift, &PD_plan, max_iteration, local_max_list_size);
timer.Stop();
partial_time = timer.Elapsed();
total_time += partial_time;
#ifdef GPU_ANALYSIS_DEBUG
printf("THR_WARP took:%f ms\n", partial_time);
#endif
//-------------- Thresholding
}
else {
//-------------- Peak finding
timer.Start();
PEAK_FIND(d_output_SNR, d_output_taps, d_peak_list, DM_list[f], nTimesamples, cutoff, local_max_list_size, gmem_peak_pos, DM_shift, &PD_plan, max_iteration);
timer.Stop();
partial_time = timer.Elapsed();
total_time += partial_time;
#ifdef GPU_ANALYSIS_DEBUG
printf("PEAK_FIND took:%f ms\n", partial_time);
#endif
//-------------- Peak finding
}
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipMemcpy(&temp_peak_pos, gmem_peak_pos, sizeof(int), hipMemcpyDeviceToHost));
#ifdef GPU_ANALYSIS_DEBUG
printf("temp_peak_pos:%d; host_pos:%zu; max:%zu; local_max:%d;\n", temp_peak_pos, (*peak_pos), max_peak_size, local_max_list_size);
#endif
if( temp_peak_pos>=local_max_list_size ) {
printf(" Maximum list size reached! Increase list size or increase sigma cutoff.\n");
temp_peak_pos=local_max_list_size;
}
if( ((*peak_pos) + temp_peak_pos)<max_peak_size){
checkCudaErrors(hipMemcpy(&h_peak_list[(*peak_pos)*4], d_peak_list, temp_peak_pos*4*sizeof(float), hipMemcpyDeviceToHost));
*peak_pos = (*peak_pos) + temp_peak_pos;
}
else printf("Error peak list is too small!\n");
//---------> Old thresholding code.
//#ifdef OLD_THRESHOLD
//#endif
//---------> Old thresholding code.
DM_shift = DM_shift + DM_list[f];
hipMemset((void*) gmem_peak_pos, 0, sizeof(int));
}
//------------------------> Output
#pragma omp parallel for
for (int count = 0; count < (*peak_pos); count++){
h_peak_list[4*count] = h_peak_list[4*count]*dm_step[i] + dm_low[i];
h_peak_list[4*count + 1] = h_peak_list[4*count + 1]*tsamp + tstart;
}
FILE *fp_out;
char filename[200];
if(candidate_algorithm==1){
if((*peak_pos)>0){
sprintf(filename, "analysed-t_%.2f-dm_%.2f-%.2f.dat", tstart, dm_low[i], dm_high[i]);
if (( fp_out = fopen(filename, "wb") ) == NULL) {
fprintf(stderr, "Error opening output file!\n");
exit(0);
}
fwrite(h_peak_list, (*peak_pos)*sizeof(float), 4, fp_out);
fclose(fp_out);
}
}
else {
if((*peak_pos)>0){
sprintf(filename, "peak_analysed-t_%.2f-dm_%.2f-%.2f.dat", tstart, dm_low[i], dm_high[i]);
if (( fp_out = fopen(filename, "wb") ) == NULL) {
fprintf(stderr, "Error opening output file!\n");
exit(0);
}
fwrite(h_peak_list, (*peak_pos)*sizeof(float), 4, fp_out);
fclose(fp_out);
}
}
//------------------------> Output
hipFree(d_peak_list);
hipFree(d_boxcar_values);
hipFree(d_decimated);
hipFree(d_output_SNR);
hipFree(d_output_taps);
hipFree(gmem_peak_pos);
}
else printf("Error not enough memory to search for pulses\n");
printf("\n TOTAL TIME OF SPS:%f ms\n", total_time);
printf("----------<\n\n");
hipFree(d_MSD);
//----------> GPU part
//---------------------------------------------------------------------------
}
|
6fe44b0bee3b60c42ea7a03620c11e045e674351.cu
|
//#define GPU_ANALYSIS_DEBUG
#include <vector>
#include <stdio.h>
#include <stdlib.h>
#include "headers/params.h"
#include "headers/device_BC_plan.h"
#include "headers/device_peak_find.h"
#include "headers/device_MSD_BLN_grid.h"
#include "headers/device_MSD_BLN_pw.h"
//#include "headers/device_MSD_BLN_pw_dp.h"
#include "headers/device_MSD_limited.h"
#include "headers/device_SPS_long.h"
#include "headers/device_threshold.h"
#include "headers/device_single_FIR.h"
#include "timer.h"
//---------------------------------------------------------------------------------
//-------> Kahan MSD
void d_kahan_summation(float *signal, int nDMs, int nTimesamples, int offset, float *result, float *error){
double sum;
double sum_error;
double a,b;
sum=0;
sum_error=0;
for(int d=0;d<nDMs; d++){
for(int s=0; s<(nTimesamples-offset); s++){
a=signal[(size_t) (d*nTimesamples + s)]-sum_error;
b=sum+a;
sum_error=(b-sum);
sum_error=sum_error-a;
sum=b;
}
}
*result=sum;
*error=sum_error;
}
void d_kahan_sd(float *signal, int nDMs, int nTimesamples, int offset, double mean, float *result, float *error){
double sum;
double sum_error;
double a,b,dtemp;
sum=0;
sum_error=0;
for(int d=0;d<nDMs; d++){
for(int s=0; s<(nTimesamples-offset); s++){
dtemp=(signal[(size_t) (d*nTimesamples + s)]-sum_error - mean);
a=dtemp*dtemp;
b=sum+a;
sum_error=(b-sum);
sum_error=sum_error-a;
sum=b;
}
}
*result=sum;
*error=sum_error;
}
void MSD_Kahan(float *h_input, int nDMs, int nTimesamples, int offset, double *mean, double *sd){
float error, signal_mean, signal_sd;
int nElements=nDMs*(nTimesamples-offset);
d_kahan_summation(h_input, nDMs, nTimesamples, offset, &signal_mean, &error);
signal_mean=signal_mean/nElements;
d_kahan_sd(h_input, nDMs, nTimesamples, offset, signal_mean, &signal_sd, &error);
signal_sd=sqrt(signal_sd/nElements);
*mean=signal_mean;
*sd=signal_sd;
}
void MSD_on_GPU(float *h_input, float *d_input, float *d_MSD, float *signal_mean, float *signal_sd, float *signal_mean_bln, float *signal_sd_bln, float *signal_mean_bl_bln, float *signal_sd_bl_bln, int nDMs, int nTimesamples, int offset, float sigma_constant, float *MSD_limited_time, float *MSD_BLN_pw_time, float *MSD_BLN_grid_time){
GpuTimer timer;
float h_MSD[3];
cudaMemcpy( d_input, h_input, ((size_t) nDMs*nTimesamples)*sizeof(float), cudaMemcpyHostToDevice);
timer.Start();
MSD_limited(d_input, d_MSD, nDMs, nTimesamples, offset);
timer.Stop();
(*MSD_limited_time) = timer.Elapsed();
cudaMemcpy(h_MSD, d_MSD, 3*sizeof(float), cudaMemcpyDeviceToHost);
(*signal_mean) = h_MSD[0];
(*signal_sd) = h_MSD[1];
timer.Start();
MSD_BLN_pw(d_input, d_MSD, nDMs, nTimesamples, offset, sigma_constant);
timer.Stop();
(*MSD_BLN_pw_time) = timer.Elapsed();
cudaMemcpy(h_MSD, d_MSD, 3*sizeof(float), cudaMemcpyDeviceToHost);
(*signal_mean_bln) = h_MSD[0];
(*signal_sd_bln) = h_MSD[1];
timer.Start();
MSD_BLN_grid(d_input, d_MSD, 32, 32, nDMs, nTimesamples, offset, sigma_constant);
timer.Stop();
(*MSD_BLN_grid_time) = timer.Elapsed();
cudaMemcpy(h_MSD, d_MSD, 3*sizeof(float), cudaMemcpyDeviceToHost);
(*signal_mean_bl_bln) = h_MSD[0];
(*signal_sd_bl_bln) = h_MSD[1];
}
void MSD_on_GPU_LA(float *h_input, float *d_input, float *d_MSD, float *h_MSD_LA, float *h_MSD_BLN_LA, int nDMs, int nTimesamples, int offset, float sigma_constant){
cudaMemcpy( d_input, h_input, ((size_t) nDMs*nTimesamples)*sizeof(float), cudaMemcpyHostToDevice);
MSD_linear_approximation(d_input, d_MSD, 32, nDMs, nTimesamples, offset);
cudaMemcpy(h_MSD_LA, d_MSD, 3*sizeof(float), cudaMemcpyDeviceToHost);
MSD_BLN_LA_pw_normal(d_input, d_MSD, 32, nDMs, nTimesamples, offset, sigma_constant);
cudaMemcpy(h_MSD_BLN_LA, d_MSD, 3*sizeof(float), cudaMemcpyDeviceToHost);
}
void MSD_on_GPU_halfed(float *h_input, float *d_input, float *d_MSD, float *signal_mean, float *signal_sd, float *signal_mean_bln, float *signal_sd_bln, int nDMs, int nTimesamples, int offset, float sigma_constant){
float h_MSD[3];
float *h_temp;
int dt=nTimesamples/2;
h_temp = new float[nDMs*dt];
for(int d=0; d<nDMs; d++){
for(int s=0; s<dt; s++){
h_temp[d*dt + s] = h_input[d*nTimesamples + 2*s];
}
}
cudaMemcpy( d_input, h_temp, ((size_t) nDMs*dt)*sizeof(float), cudaMemcpyHostToDevice);
MSD_limited(d_input, d_MSD, nDMs, dt, offset/2);
cudaMemcpy(h_MSD, d_MSD, 3*sizeof(float), cudaMemcpyDeviceToHost);
(*signal_mean) = h_MSD[0];
(*signal_sd) = h_MSD[1];
MSD_BLN_pw(d_input, d_MSD, nDMs, dt, offset/2, sigma_constant);
cudaMemcpy(h_MSD, d_MSD, 3*sizeof(float), cudaMemcpyDeviceToHost);
(*signal_mean_bln) = h_MSD[0];
(*signal_sd_bln) = h_MSD[1];
delete [] h_temp;
}
//-------> Kahan MSD
//---------------------------------------------------------------------------------
void Calculate_FIR(float *input, float *output, int nTaps, int nDMs, int nTimesamples, int ut) {
int d,s,t;
float ftemp;
for(d=0; d<nDMs; d++){
for(s=0; s<nTimesamples-ut; s++){
ftemp=0;
for(t=0; t<nTaps; t++){
ftemp+=input[d*nTimesamples + s + t];
}
output[d*nTimesamples + s]=ftemp;
}
}
}
void Decimate_in_time(float *h_input, float *h_CPU_decimate, int DIT_value, int DIT_factor, int nDMs, int nTimesamples, int offset){
float ftemp;
int decimated_timesamples;
decimated_timesamples=nTimesamples/(DIT_value*DIT_factor);
for(int d=0; d<nDMs; d++){
for(int s=0; s<decimated_timesamples; s++){
ftemp=0;
for(int t=0; t<DIT_factor; t++){
ftemp = ftemp + h_input[d*decimated_timesamples*DIT_factor + s*DIT_factor + t];
}
h_CPU_decimate[d*decimated_timesamples + s]=ftemp;
}
}
}
void Export_data(float *input, size_t nDMs, size_t nTimesamples, char *filename){
FILE *fp_out;
char mod_filename[200];
sprintf(mod_filename,"%s.dat",filename);
if (( fp_out = fopen(filename, "wb") ) == NULL) {
fprintf(stderr, "Error opening output file!\n");
exit(0);
}
fwrite(input, (nDMs*nTimesamples)*sizeof(float), 4, fp_out);
fclose(fp_out);
for(int d=0; d<nDMs; d++){
sprintf(mod_filename,"%s_dm%d.dat",filename,d);
if (( fp_out = fopen(filename, "wb") ) == NULL) {
fprintf(stderr, "Error opening output file!\n");
exit(0);
}
fwrite(&input[d*nTimesamples], nTimesamples*sizeof(float), 4, fp_out);
fclose(fp_out);
}
}
void export_file_nDM_nTimesamples(float *data, int nDMs, int nTimesamples, char *filename) {
FILE *file_out;
char str[200];
sprintf(str, "%s_DM.dat", filename);
if (( file_out = fopen(str, "w") ) == NULL) {
fprintf(stderr, "Error opening output file!\n");
exit(0);
}
printf("export nDMs\n");
for (int s = 0; s < nTimesamples; s++) {
for (int d = 0; d < nDMs; d++) {
fprintf(file_out, "%f ", data[d*nTimesamples + s]);
}
fprintf(file_out, "\n");
}
fclose(file_out);
sprintf(str, "%s_Time.dat", filename);
if (( file_out = fopen(str, "w") ) == NULL) {
fprintf(stderr, "Error opening output file!\n");
exit(0);
}
printf("export nTimesamples\n");
for (int d = 0; d < nDMs; d++) {
for (int s = 0; s < nTimesamples; s++) {
fprintf(file_out, "%f ", data[d*nTimesamples + s]);
}
fprintf(file_out, "\n");
}
fclose(file_out);
}
void Create_PD_plan(std::vector<PulseDetection_plan> *PD_plan, std::vector<int> *BC_widths, int nDMs, int nTimesamples){
int Elements_per_block, itemp, nRest;
PulseDetection_plan PDmp;
if(BC_widths->size()>0){
PDmp.shift = 0;
PDmp.output_shift = 0;
PDmp.startTaps = 0;
PDmp.iteration = 0;
PDmp.decimated_timesamples = nTimesamples;
PDmp.dtm = (nTimesamples>>(PDmp.iteration+1));
PDmp.dtm = PDmp.dtm - (PDmp.dtm&1);
PDmp.nBoxcars = BC_widths->operator[](0);
Elements_per_block = PD_NTHREADS*2 - PDmp.nBoxcars;
itemp = PDmp.decimated_timesamples;
PDmp.nBlocks = itemp/Elements_per_block;
nRest = itemp - PDmp.nBlocks*Elements_per_block;
if(nRest>0) PDmp.nBlocks++;
PDmp.unprocessed_samples = PDmp.nBoxcars + 6;
if(PDmp.decimated_timesamples<PDmp.unprocessed_samples) PDmp.nBlocks=0;
PDmp.total_ut = PDmp.unprocessed_samples;
PD_plan->push_back(PDmp);
for(int f=1; f< (int) BC_widths->size(); f++){
// These are based on previous values of PDmp
PDmp.shift = PDmp.nBoxcars/2;
PDmp.output_shift = PDmp.output_shift + PDmp.decimated_timesamples;
PDmp.startTaps = PDmp.startTaps + PDmp.nBoxcars*(1<<PDmp.iteration);
PDmp.iteration = PDmp.iteration + 1;
// Definition of new PDmp values
PDmp.decimated_timesamples = PDmp.dtm;
PDmp.dtm = (nTimesamples>>(PDmp.iteration+1));
PDmp.dtm = PDmp.dtm - (PDmp.dtm&1);
PDmp.nBoxcars = BC_widths->operator[](f);
Elements_per_block=PD_NTHREADS*2 - PDmp.nBoxcars;
itemp = PDmp.decimated_timesamples;
PDmp.nBlocks = itemp/Elements_per_block;
nRest = itemp - PDmp.nBlocks*Elements_per_block;
if(nRest>0) PDmp.nBlocks++;
PDmp.unprocessed_samples = PDmp.unprocessed_samples/2 + PDmp.nBoxcars + 6; //
if(PDmp.decimated_timesamples<PDmp.unprocessed_samples) PDmp.nBlocks=0;
PDmp.total_ut = PDmp.unprocessed_samples*(1<<PDmp.iteration);
PD_plan->push_back(PDmp);
}
}
}
int Get_max_iteration(int max_boxcar_width, std::vector<int> *BC_widths, int *max_width_performed){
int startTaps, iteration;
startTaps = 0;
iteration = 0;
for(int f=0; f<(int) BC_widths->size(); f++){
startTaps = startTaps + BC_widths->operator[](f)*(1<<f);
if(startTaps>=max_boxcar_width) {
iteration = f+1;
break;
}
}
if(max_boxcar_width>startTaps) {
iteration=(int) BC_widths->size();
}
*max_width_performed=startTaps;
return(iteration);
}
void analysis_GPU(float *h_peak_list, size_t *peak_pos, size_t max_peak_size, int i, float tstart, int t_processed, int inBin, int outBin, int *maxshift, int max_ndms, int *ndms, float cutoff, float sigma_constant, float max_boxcar_width_in_sec, float *output_buffer, float *dm_low, float *dm_high, float *dm_step, float tsamp, int candidate_algorithm, int enable_sps_baselinenoise){
int max_boxcar_width = (int) (max_boxcar_width_in_sec/tsamp);
int max_width_performed=0;
//unsigned long int j;
unsigned long int vals;
int nTimesamples = t_processed;
int nDMs = ndms[i];
int temp_peak_pos;
//double total;
// Calculate the total number of values
vals = (unsigned long int) ( nDMs*nTimesamples );
double total_time, partial_time;
//float max, min, threshold;
int max_iteration;
int t_BC_widths[10]={PD_MAXTAPS,16,16,16,8,8,8,8,8,8};
std::vector<int> BC_widths(t_BC_widths,t_BC_widths+sizeof(t_BC_widths)/sizeof(int));
std::vector<PulseDetection_plan> PD_plan;
//---------------------------------------------------------------------------
//----------> GPU part
printf("\n----------> GPU analysis part\n");
printf(" Dimensions nDMs:%d; nTimesamples:%d; inBin:%d; outBin:%d; maxshift:%d; \n", ndms[i], t_processed, inBin, outBin, *maxshift);
GpuTimer timer;
//float h_MSD[3];
float *d_MSD;
checkCudaErrors(cudaGetLastError());
if ( cudaSuccess != cudaMalloc((void**) &d_MSD, sizeof(float)*3)) {printf("Allocation error!\n"); exit(201);}
total_time = 0;
/*
//-------------- CPU check
float *h_temp;
double signal_mean, signal_sd;
h_temp = (float *)malloc( ((size_t) nDMs*nTimesamples)*sizeof(float));
memset(h_temp, 0.0, ((size_t) nDMs*nTimesamples)*sizeof(float));
cudaMemcpy( h_temp, output_buffer, ((size_t) nDMs*nTimesamples)*sizeof(float), cudaMemcpyDeviceToHost);
MSD_Kahan(h_temp, nDMs, nTimesamples, 0, &signal_mean, &signal_sd);
printf("MSD_kahan: after 1 tap Mean: %e, Standard deviation: %e;\n",signal_mean, signal_sd);
//-------------- CPU check
*/
/*
//-------------- One Call linear approximation
timer.Start();
MSD_linear_approximation(output_buffer, d_MSD, PD_MAXTAPS, nDMs, nTimesamples, 0);
timer.Stop();
partial_time = timer.Elapsed();
total_time += partial_time;
cudaMemcpy(h_MSD, d_MSD, 3*sizeof(float), cudaMemcpyDeviceToHost);
printf(" MSD linear approximation: Mean: %f, Stddev: %f, modifier: %f\n", h_MSD[0], h_MSD[1], h_MSD[2]);
#ifdef GPU_ANALYSIS_DEBUG
printf(" One kernel took:%f ms\n", partial_time);
#endif
//-------------- One Call linear approximation
*/
/*
//-------------- Base level noise point-wise
timer.Start();
MSD_BLN_pw(output_buffer, d_MSD, nDMs, nTimesamples, 0, sigma_constant);
timer.Stop();
partial_time = timer.Elapsed();
total_time += partial_time;
cudaMemcpy(h_MSD, d_MSD, 3*sizeof(float), cudaMemcpyDeviceToHost);
printf(" MSD BLN point-wise: Mean: %f, Stddev: %f, modifier: %f\n", h_MSD[0], h_MSD[1], h_MSD[2]);
#ifdef GPU_ANALYSIS_DEBUG
printf(" MSD BLN point-wise kernel took:%f ms\n", partial_time);
#endif
//-------------- Base level noise point-wise
*/
/*
//-------------- BLN_LA
timer.Start();
MSD_BLN_LA_pw_normal(output_buffer, d_MSD, nDMs, nTimesamples, PD_MAXTAPS, 0, sigma_constant);
timer.Stop();
partial_time = timer.Elapsed();
total_time += partial_time;
cudaMemcpy(h_MSD, d_MSD, 3*sizeof(float), cudaMemcpyDeviceToHost);
printf(" MSD BLN linear approximation: Mean: %f, Stddev: %f, modifier: %f\n", h_MSD[0], h_MSD[1], h_MSD[2]);
#ifdef GPU_ANALYSIS_DEBUG
printf(" BLN LA took:%f ms\n", partial_time);
#endif
//-------------- BLN_LA
*/
/*
//-------------- Base level noise grid
timer.Start();
MSD_BLN_grid(output_buffer, d_MSD, 32, 32, nDMs, nTimesamples, 0, sigma_constant);
timer.Stop();
partial_time = timer.Elapsed();
total_time += partial_time;
cudaMemcpy(h_MSD, d_MSD, 3*sizeof(float), cudaMemcpyDeviceToHost);
printf(" MSD BLN grid: Mean: %f, Stddev: %f, modifier: %f\n", h_MSD[0], h_MSD[1], h_MSD[2]);
#ifdef GPU_ANALYSIS_DEBUG
printf(" MSD BLN grid kernel took:%f ms\n", partial_time);
#endif
//-------------- Base level noise grid
*/
size_t free_mem,total_mem;
cudaMemGetInfo(&free_mem,&total_mem);
printf(" Memory required by boxcar filters:%0.3f MB\n",(4.5*vals*sizeof(float) + 2*vals*sizeof(ushort))/(1024.0*1024) );
printf(" Memory available:%0.3f MB \n", ((float) free_mem)/(1024.0*1024.0) );
std::vector<int> DM_list;
unsigned long int max_timesamples=(free_mem*0.95)/(5.5*sizeof(float) + 2*sizeof(ushort));
int DMs_per_cycle = max_timesamples/nTimesamples;
int nRepeats, nRest, DM_shift, itemp, local_max_list_size;//BC_shift,
itemp = (int) (DMs_per_cycle/THR_WARPS_PER_BLOCK);
DMs_per_cycle = itemp*THR_WARPS_PER_BLOCK;
nRepeats = nDMs/DMs_per_cycle;
nRest = nDMs - nRepeats*DMs_per_cycle;
local_max_list_size = (DMs_per_cycle*nTimesamples)/4;
for(int f=0; f<nRepeats; f++) DM_list.push_back(DMs_per_cycle);
if(nRest>0) DM_list.push_back(nRest);
printf(" SPS will run %d batches each containing %d DM trials. Remainder %d DM trials\n", (int) DM_list.size(), DMs_per_cycle, nRest);
max_iteration = Get_max_iteration(max_boxcar_width/inBin, &BC_widths, &max_width_performed);
printf(" Selected iteration:%d; maximum boxcar width requested:%d; maximum boxcar width performed:%d;\n", max_iteration, max_boxcar_width/inBin, max_width_performed);
Create_PD_plan(&PD_plan, &BC_widths, 1, nTimesamples);
if(DM_list.size()>0){
DMs_per_cycle = DM_list[0];
float *d_peak_list;
if ( cudaSuccess != cudaMalloc((void**) &d_peak_list, sizeof(float)*DMs_per_cycle*nTimesamples)) printf("Allocation error! peaks\n");
float *d_decimated;
if ( cudaSuccess != cudaMalloc((void **) &d_decimated, sizeof(float)*(((DMs_per_cycle*nTimesamples)/2)+PD_MAXTAPS) )) printf("Allocation error! dedispered\n");
float *d_boxcar_values;
if ( cudaSuccess != cudaMalloc((void **) &d_boxcar_values, sizeof(float)*DMs_per_cycle*nTimesamples)) printf("Allocation error! boxcars\n");
float *d_output_SNR;
if ( cudaSuccess != cudaMalloc((void **) &d_output_SNR, sizeof(float)*2*DMs_per_cycle*nTimesamples)) printf("Allocation error! SNR\n");
ushort *d_output_taps;
if ( cudaSuccess != cudaMalloc((void **) &d_output_taps, sizeof(ushort)*2*DMs_per_cycle*nTimesamples)) printf("Allocation error! taps\n");
int *gmem_peak_pos;
cudaMalloc((void**) &gmem_peak_pos, 1*sizeof(int));
cudaMemset((void*) gmem_peak_pos, 0, sizeof(int));
DM_shift = 0;
for(int f=0; f<DM_list.size(); f++) {
//-------------- SPS BLN
timer.Start();
//PD_SEARCH_LONG_BLN(&output_buffer[DM_shift*nTimesamples], d_boxcar_values, d_decimated, d_output_SNR, d_output_taps, d_MSD, &PD_plan, max_iteration, DM_list[f], nTimesamples);
//PD_SEARCH_LONG_BLN_EACH(&output_buffer[DM_shift*nTimesamples], d_boxcar_values, d_decimated, d_output_SNR, d_output_taps, &PD_plan, max_iteration, DM_list[f], nTimesamples, sigma_constant);
//PD_SEARCH_LONG_LINAPPROX(&output_buffer[DM_shift*nTimesamples], d_boxcar_values, d_decimated, d_output_SNR, d_output_taps, d_MSD, &PD_plan, max_iteration, DM_list[f], nTimesamples);
if(enable_sps_baselinenoise){
PD_SEARCH_LONG_BLN_LINAPPROX_EACH(&output_buffer[DM_shift*nTimesamples], d_boxcar_values, d_decimated, d_output_SNR, d_output_taps, &PD_plan, max_iteration, DM_list[f], nTimesamples, sigma_constant);
}
else {
PD_SEARCH_LONG_LINAPPROX_EACH(&output_buffer[DM_shift*nTimesamples], d_boxcar_values, d_decimated, d_output_SNR, d_output_taps, &PD_plan, max_iteration, DM_list[f], nTimesamples);
}
//
timer.Stop();
partial_time = timer.Elapsed();
total_time += partial_time;
#ifdef GPU_ANALYSIS_DEBUG
printf("PD_SEARCH took:%f ms\n", partial_time);
#endif
//-------------- SPS BLN
checkCudaErrors(cudaGetLastError());
#ifdef GPU_ANALYSIS_DEBUG
printf("BC_shift:%d; DMs_per_cycle:%d; f*DMs_per_cycle:%d; max_iteration:%d;\n", DM_shift*nTimesamples, DM_list[f], DM_shift, max_iteration);
#endif
if(candidate_algorithm==1){
//-------------- Thresholding
timer.Start();
THRESHOLD(d_output_SNR, d_output_taps, d_peak_list, gmem_peak_pos, cutoff, DM_list[f], nTimesamples, DM_shift, &PD_plan, max_iteration, local_max_list_size);
timer.Stop();
partial_time = timer.Elapsed();
total_time += partial_time;
#ifdef GPU_ANALYSIS_DEBUG
printf("THR_WARP took:%f ms\n", partial_time);
#endif
//-------------- Thresholding
}
else {
//-------------- Peak finding
timer.Start();
PEAK_FIND(d_output_SNR, d_output_taps, d_peak_list, DM_list[f], nTimesamples, cutoff, local_max_list_size, gmem_peak_pos, DM_shift, &PD_plan, max_iteration);
timer.Stop();
partial_time = timer.Elapsed();
total_time += partial_time;
#ifdef GPU_ANALYSIS_DEBUG
printf("PEAK_FIND took:%f ms\n", partial_time);
#endif
//-------------- Peak finding
}
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaMemcpy(&temp_peak_pos, gmem_peak_pos, sizeof(int), cudaMemcpyDeviceToHost));
#ifdef GPU_ANALYSIS_DEBUG
printf("temp_peak_pos:%d; host_pos:%zu; max:%zu; local_max:%d;\n", temp_peak_pos, (*peak_pos), max_peak_size, local_max_list_size);
#endif
if( temp_peak_pos>=local_max_list_size ) {
printf(" Maximum list size reached! Increase list size or increase sigma cutoff.\n");
temp_peak_pos=local_max_list_size;
}
if( ((*peak_pos) + temp_peak_pos)<max_peak_size){
checkCudaErrors(cudaMemcpy(&h_peak_list[(*peak_pos)*4], d_peak_list, temp_peak_pos*4*sizeof(float), cudaMemcpyDeviceToHost));
*peak_pos = (*peak_pos) + temp_peak_pos;
}
else printf("Error peak list is too small!\n");
//---------> Old thresholding code.
//#ifdef OLD_THRESHOLD
//#endif
//---------> Old thresholding code.
DM_shift = DM_shift + DM_list[f];
cudaMemset((void*) gmem_peak_pos, 0, sizeof(int));
}
//------------------------> Output
#pragma omp parallel for
for (int count = 0; count < (*peak_pos); count++){
h_peak_list[4*count] = h_peak_list[4*count]*dm_step[i] + dm_low[i];
h_peak_list[4*count + 1] = h_peak_list[4*count + 1]*tsamp + tstart;
}
FILE *fp_out;
char filename[200];
if(candidate_algorithm==1){
if((*peak_pos)>0){
sprintf(filename, "analysed-t_%.2f-dm_%.2f-%.2f.dat", tstart, dm_low[i], dm_high[i]);
if (( fp_out = fopen(filename, "wb") ) == NULL) {
fprintf(stderr, "Error opening output file!\n");
exit(0);
}
fwrite(h_peak_list, (*peak_pos)*sizeof(float), 4, fp_out);
fclose(fp_out);
}
}
else {
if((*peak_pos)>0){
sprintf(filename, "peak_analysed-t_%.2f-dm_%.2f-%.2f.dat", tstart, dm_low[i], dm_high[i]);
if (( fp_out = fopen(filename, "wb") ) == NULL) {
fprintf(stderr, "Error opening output file!\n");
exit(0);
}
fwrite(h_peak_list, (*peak_pos)*sizeof(float), 4, fp_out);
fclose(fp_out);
}
}
//------------------------> Output
cudaFree(d_peak_list);
cudaFree(d_boxcar_values);
cudaFree(d_decimated);
cudaFree(d_output_SNR);
cudaFree(d_output_taps);
cudaFree(gmem_peak_pos);
}
else printf("Error not enough memory to search for pulses\n");
printf("\n TOTAL TIME OF SPS:%f ms\n", total_time);
printf("----------<\n\n");
cudaFree(d_MSD);
//----------> GPU part
//---------------------------------------------------------------------------
}
|
2e7dfaf4a2fa98f1149f8925a141ed303ebda573.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel5_minus_2_front;
int xdim0_update_halo_kernel5_minus_2_front_h = -1;
__constant__ int ydim0_update_halo_kernel5_minus_2_front;
int ydim0_update_halo_kernel5_minus_2_front_h = -1;
__constant__ int xdim1_update_halo_kernel5_minus_2_front;
int xdim1_update_halo_kernel5_minus_2_front_h = -1;
__constant__ int ydim1_update_halo_kernel5_minus_2_front;
int ydim1_update_halo_kernel5_minus_2_front_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel5_minus_2_front * (y) + \
xdim0_update_halo_kernel5_minus_2_front * \
ydim0_update_halo_kernel5_minus_2_front * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel5_minus_2_front * (y) + \
xdim1_update_halo_kernel5_minus_2_front * \
ydim1_update_halo_kernel5_minus_2_front * (z))
// user function
__device__
inline void
update_halo_kernel5_minus_2_front_gpu(double *vol_flux_z,
double *mass_flux_z,
const int *fields) {
if (fields[FIELD_VOL_FLUX_Z] == 1)
vol_flux_z[OPS_ACC0(0, 0, 0)] = -vol_flux_z[OPS_ACC0(0, 0, -2)];
if (fields[FIELD_MASS_FLUX_Z] == 1)
mass_flux_z[OPS_ACC1(0, 0, 0)] = -mass_flux_z[OPS_ACC1(0, 0, -2)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel5_minus_2_front(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel5_minus_2_front +
idx_z * 1 * 1 * xdim0_update_halo_kernel5_minus_2_front *
ydim0_update_halo_kernel5_minus_2_front;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel5_minus_2_front +
idx_z * 1 * 1 * xdim1_update_halo_kernel5_minus_2_front *
ydim1_update_halo_kernel5_minus_2_front;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel5_minus_2_front_gpu(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel5_minus_2_front(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1,
ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 139))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(139, "update_halo_kernel5_minus_2_front");
OPS_kernels[139].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel5_minus_2_front_h ||
ydim0 != ydim0_update_halo_kernel5_minus_2_front_h ||
xdim1 != xdim1_update_halo_kernel5_minus_2_front_h ||
ydim1 != ydim1_update_halo_kernel5_minus_2_front_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel5_minus_2_front, &xdim0,
sizeof(int));
xdim0_update_halo_kernel5_minus_2_front_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel5_minus_2_front, &ydim0,
sizeof(int));
ydim0_update_halo_kernel5_minus_2_front_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel5_minus_2_front, &xdim1,
sizeof(int));
xdim1_update_halo_kernel5_minus_2_front_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel5_minus_2_front, &ydim1,
sizeof(int));
ydim1_update_halo_kernel5_minus_2_front_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[139].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel5_minus_2_front), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[139].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[139].mpi_time += t2 - t1;
OPS_kernels[139].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[139].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
|
2e7dfaf4a2fa98f1149f8925a141ed303ebda573.cu
|
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel5_minus_2_front;
int xdim0_update_halo_kernel5_minus_2_front_h = -1;
__constant__ int ydim0_update_halo_kernel5_minus_2_front;
int ydim0_update_halo_kernel5_minus_2_front_h = -1;
__constant__ int xdim1_update_halo_kernel5_minus_2_front;
int xdim1_update_halo_kernel5_minus_2_front_h = -1;
__constant__ int ydim1_update_halo_kernel5_minus_2_front;
int ydim1_update_halo_kernel5_minus_2_front_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel5_minus_2_front * (y) + \
xdim0_update_halo_kernel5_minus_2_front * \
ydim0_update_halo_kernel5_minus_2_front * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel5_minus_2_front * (y) + \
xdim1_update_halo_kernel5_minus_2_front * \
ydim1_update_halo_kernel5_minus_2_front * (z))
// user function
__device__
inline void
update_halo_kernel5_minus_2_front_gpu(double *vol_flux_z,
double *mass_flux_z,
const int *fields) {
if (fields[FIELD_VOL_FLUX_Z] == 1)
vol_flux_z[OPS_ACC0(0, 0, 0)] = -vol_flux_z[OPS_ACC0(0, 0, -2)];
if (fields[FIELD_MASS_FLUX_Z] == 1)
mass_flux_z[OPS_ACC1(0, 0, 0)] = -mass_flux_z[OPS_ACC1(0, 0, -2)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel5_minus_2_front(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel5_minus_2_front +
idx_z * 1 * 1 * xdim0_update_halo_kernel5_minus_2_front *
ydim0_update_halo_kernel5_minus_2_front;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel5_minus_2_front +
idx_z * 1 * 1 * xdim1_update_halo_kernel5_minus_2_front *
ydim1_update_halo_kernel5_minus_2_front;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel5_minus_2_front_gpu(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel5_minus_2_front(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1,
ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 139))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(139, "update_halo_kernel5_minus_2_front");
OPS_kernels[139].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel5_minus_2_front_h ||
ydim0 != ydim0_update_halo_kernel5_minus_2_front_h ||
xdim1 != xdim1_update_halo_kernel5_minus_2_front_h ||
ydim1 != ydim1_update_halo_kernel5_minus_2_front_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel5_minus_2_front, &xdim0,
sizeof(int));
xdim0_update_halo_kernel5_minus_2_front_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel5_minus_2_front, &ydim0,
sizeof(int));
ydim0_update_halo_kernel5_minus_2_front_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel5_minus_2_front, &xdim1,
sizeof(int));
xdim1_update_halo_kernel5_minus_2_front_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel5_minus_2_front, &ydim1,
sizeof(int));
ydim1_update_halo_kernel5_minus_2_front_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[139].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel5_minus_2_front<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[139].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[139].mpi_time += t2 - t1;
OPS_kernels[139].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[139].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
|
9a27a1e4465221342dea89a5a780869accc648b5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <string.h> /* memcpy */
#include <math.h>
#include <stdint.h>
void *cuda_upload_var(void *host_var, int size)
{
void *cuda_var;
hipMalloc(&cuda_var, 4);
hipMemcpy(cuda_var, host_var, size, hipMemcpyHostToDevice);
return cuda_var;
}
void cuda_download_var(void *cuda_var, void *host_var, int size)
{
hipMemcpy(host_var, cuda_var, size, hipMemcpyDeviceToHost);
hipFree(cuda_var);
}
/* Extended C syntax test */
/**********************/
/* BLOCK COMMENT TEST */
/**********************/
int aaaaa; /* Comment explaining 'aaaaa' */ /* See how comments are preserved in the output */
char bbbbbb;
typedef struct Aggregate2
{
int c;
} Aggregate2;
typedef struct Aggregate
{
int a;
int b;
Aggregate2 c;
} Aggregate;
/* These will be in global scope in the output code */
typedef struct LocalType
{
int foo;
} LocalType;
void local_func(int p)
{
LocalType bug_test; /* Just to test that identifier lookup works */
bug_test.foo = 123;
aaaaa = bug_test.foo + p;
}
int main(int argc, const char **argv)
{
int temp_var;
int i;
Aggregate foo1 = {
0,
1,
{ 2 }
};
temp_var = 1 + 2*3;
local_func(10);
{
int test;
test = 5;
aaaaa = test;
}
if (1) {
/* Foo */
for (i = 0; i < 10; i = i + 1) {
temp_var = temp_var + 1;
}
} else if (2) {
for (i = 0; i < 10; i = i + 1)
;
} else {
/* Bar */
if (1) {
i = 2;
}
while (i) {
i = i - 1;
}
}
return foo1.a;
}
|
9a27a1e4465221342dea89a5a780869accc648b5.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <string.h> /* memcpy */
#include <math.h>
#include <stdint.h>
void *cuda_upload_var(void *host_var, int size)
{
void *cuda_var;
cudaMalloc(&cuda_var, 4);
cudaMemcpy(cuda_var, host_var, size, cudaMemcpyHostToDevice);
return cuda_var;
}
void cuda_download_var(void *cuda_var, void *host_var, int size)
{
cudaMemcpy(host_var, cuda_var, size, cudaMemcpyDeviceToHost);
cudaFree(cuda_var);
}
/* Extended C syntax test */
/**********************/
/* BLOCK COMMENT TEST */
/**********************/
int aaaaa; /* Comment explaining 'aaaaa' */ /* See how comments are preserved in the output */
char bbbbbb;
typedef struct Aggregate2
{
int c;
} Aggregate2;
typedef struct Aggregate
{
int a;
int b;
Aggregate2 c;
} Aggregate;
/* These will be in global scope in the output code */
typedef struct LocalType
{
int foo;
} LocalType;
void local_func(int p)
{
LocalType bug_test; /* Just to test that identifier lookup works */
bug_test.foo = 123;
aaaaa = bug_test.foo + p;
}
int main(int argc, const char **argv)
{
int temp_var;
int i;
Aggregate foo1 = {
0,
1,
{ 2 }
};
temp_var = 1 + 2*3;
local_func(10);
{
int test;
test = 5;
aaaaa = test;
}
if (1) {
/* Foo */
for (i = 0; i < 10; i = i + 1) {
temp_var = temp_var + 1;
}
} else if (2) {
for (i = 0; i < 10; i = i + 1)
;
} else {
/* Bar */
if (1) {
i = 2;
}
while (i) {
i = i - 1;
}
}
return foo1.a;
}
|
06abba7587acc50bc59b1c3492845f4b79387de6.hip
|
// !!! This is a file automatically generated by hipify!!!
//####################################################
//# this is the code of 'WarpLevel Prefix Sum(Scan)'.#
//####################################################
#pragma once
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <time.h>
#include "common/common_vc.h"
__global__ void warpLevelSum(float* s,float *t){
int id = threadIdx.x + blockIdx.x * blockDim.x;
int sd = threadIdx.x;
int wd = sd%32;
float tmp,swp;
tmp = s[id];
for (int i=0;i<5;i++){
int p2 = 1<<i;
int pp2= p2<<1;
swp = __shfl_xor(tmp,p2);
tmp +=( wd%pp2 == pp2 -1 )? swp : 0;
}
tmp = wd%32 == 31 ? 0 : tmp;
for (int i=4;i>=0;i--){
int p2 = 1<<i;
int pp2= p2<<1;
swp = __shfl_xor(tmp,p2);
tmp = wd%pp2 == pp2 - p2 - 1 ? swp : tmp;
tmp += wd%pp2 == pp2 - 1 ? swp : 0;
}
t[id] = tmp;
}
void initRand(){
srand((unsigned) time(nullptr));
}
void initHBuf(float *buf,int nElem){
for (int i=0;i<nElem;i++){
buf[i]=i+1;
}
}
int main(int argc,char** argv){
initRand();
int nElem = 32;
size_t nByte = nElem * sizeof(float);
float *h_s,*h_t;
float *d_s,*d_t;
h_s = (float*)malloc(nByte);
h_t = (float*)malloc(nByte);
hipMalloc(&d_s,nByte);
hipMalloc(&d_t,nByte);
initHBuf(h_s,nElem);
hipMemcpy(d_s,h_s,nByte,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( warpLevelSum), dim3(1),dim3(nElem), 0, 0, d_s,d_t);
hipMemcpy(h_t,d_t,nByte,hipMemcpyDeviceToHost);
for (int i=0;i<nElem;i++){
printf("%f,%f\n",h_s[i],h_t[i]);
}
free(h_s);
free(h_t);
hipFree(d_s);
hipFree(d_t);
}
|
06abba7587acc50bc59b1c3492845f4b79387de6.cu
|
//####################################################
//# this is the code of 'WarpLevel Prefix Sum(Scan)'.#
//####################################################
#pragma once
#include <stdio.h>
#include <cuda_runtime.h>
#include <time.h>
#include "common/common_vc.h"
__global__ void warpLevelSum(float* s,float *t){
int id = threadIdx.x + blockIdx.x * blockDim.x;
int sd = threadIdx.x;
int wd = sd%32;
float tmp,swp;
tmp = s[id];
for (int i=0;i<5;i++){
int p2 = 1<<i;
int pp2= p2<<1;
swp = __shfl_xor(tmp,p2);
tmp +=( wd%pp2 == pp2 -1 )? swp : 0;
}
tmp = wd%32 == 31 ? 0 : tmp;
for (int i=4;i>=0;i--){
int p2 = 1<<i;
int pp2= p2<<1;
swp = __shfl_xor(tmp,p2);
tmp = wd%pp2 == pp2 - p2 - 1 ? swp : tmp;
tmp += wd%pp2 == pp2 - 1 ? swp : 0;
}
t[id] = tmp;
}
void initRand(){
srand((unsigned) time(nullptr));
}
void initHBuf(float *buf,int nElem){
for (int i=0;i<nElem;i++){
buf[i]=i+1;
}
}
int main(int argc,char** argv){
initRand();
int nElem = 32;
size_t nByte = nElem * sizeof(float);
float *h_s,*h_t;
float *d_s,*d_t;
h_s = (float*)malloc(nByte);
h_t = (float*)malloc(nByte);
cudaMalloc(&d_s,nByte);
cudaMalloc(&d_t,nByte);
initHBuf(h_s,nElem);
cudaMemcpy(d_s,h_s,nByte,cudaMemcpyHostToDevice);
warpLevelSum<<<1,nElem>>>(d_s,d_t);
cudaMemcpy(h_t,d_t,nByte,cudaMemcpyDeviceToHost);
for (int i=0;i<nElem;i++){
printf("%f,%f\n",h_s[i],h_t[i]);
}
free(h_s);
free(h_t);
cudaFree(d_s);
cudaFree(d_t);
}
|
c1c6d87f955e0a8069b2c06fb09ad9666fc8a15b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
#define TX 32
#define TY 32
#define DIM 2048
struct hipComplex {
float r;
float i;
__device__ hipComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ hipComplex operator*(const hipComplex& a) {
return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ hipComplex operator-(const hipComplex& a) {
return hipComplex(r-a.r, i-a.i);
}
__device__ hipComplex operator+(const hipComplex& a) {
return hipComplex(r+a.r, i+a.i);
}
__device__ hipComplex operator/(const hipComplex& a) {
return hipComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i));
}
};
__device__ hipComplex conj(hipComplex m)
{
hipComplex out(m.r,-m.i);
return out;
}
__device__ hipComplex nor(hipComplex m)
{
hipComplex out(m.r*m.r+m.i*m.i,0.0);
return out;
}
__device__ float norg(hipComplex m)
{
return sqrtf(m.r*m.r+m.i*m.i);
}
__device__ hipComplex qpoch(hipComplex a, hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex unity(1.0,0.0);
int i = 0;
hipComplex Q = q;
if(q.magnitude2()>1.0)
{
return hipComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<80;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ hipComplex qp(hipComplex a, hipComplex q, int n) {
hipComplex out(1.0,0.0);
hipComplex unity(1.0,0.0);
int i = 0;
hipComplex Q = q;
if(q.magnitude2()>1.0)
{
return hipComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<n;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ hipComplex ramphi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,mq)/qpoch(q,mq);
}
__device__ hipComplex rampsi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,q)*qpoch(q*q,q*q);
}
__device__ hipComplex ramchi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,q*q);
}
__device__ hipComplex ramf(hipComplex a, hipComplex b) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex ma = mone*a;
hipComplex mb = mone*b;
return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b);
}
// complex exponential
__device__ hipComplex expc(hipComplex m)
{
hipComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i));
return out;
}
__device__ hipComplex powc(hipComplex ag, hipComplex bg)
{
hipComplex out(0.0,0.0);
hipComplex mesp(0.0,0.0);
hipComplex frim(0.0,0.0);
double radiu, thet;
/* get the proper polar form of the complex number */
radiu = sqrtf(ag.r*ag.r + ag.i*ag.i);
thet = atan2f(ag.i,ag.r);
/* mesp gives R^(c+di) */
mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu));
mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu));
/* frim gives e^(i theta (c+di)) */
/* now since we already have the machinery
for performing complex exponentiation (just exp), we
can just call that here */
frim.r = -1.0 * bg.i * thet;
frim.i = bg.r * thet;
frim = expc(frim);
out = mesp*frim;
return out;
}
// cosine (nothing algorithmically clean)
__device__ hipComplex cosc(hipComplex m)
{
hipComplex ai(0.0,1.0);
hipComplex ot(0.5,0.0);
hipComplex mone(-1.0,0.0);
hipComplex out = ot*(expc(m*ai) + expc(mone*m*ai));
return out;
}
__device__ hipComplex sins(hipComplex m)
{
hipComplex ai(0.0,1.0);
hipComplex ot(0.0,0.5);
hipComplex mone(-1.0,0.0);
hipComplex out = ot*(expc(m*ai) - expc(mone*m*ai));
return out;
}
__device__ hipComplex tans(hipComplex m)
{
return sins(m)/cosc(m);
}
__device__ hipComplex moeb(hipComplex t, hipComplex a, hipComplex z)
{
hipComplex out(0.0,0.0);
hipComplex ai(0.0,1.0);
hipComplex unity(1.0,0.0);
out = expc(ai*t) * (z-a)/(unity-conj(a)*z);
return out;
}
__device__ hipComplex bnewt(hipComplex z) {
hipComplex three(3.0,0.0);
hipComplex unity(1.0,0.0);
hipComplex out(0.0,0.0);
hipComplex Z =z;
hipComplex L(0.0,0.0);
hipComplex R(0.62348980185873359,0.7818314824680298);
hipComplex v(0.62348980185873359,0.7818314824680298);
int i;
for(i=0;i<100;i++)
{
L = sins(expc(Z)-cosc(Z))-Z;
out = out + v*L;
v = R * v;
Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity);
}
return out;
}
__device__ hipComplex they3(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + powc(q,enn*enn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ hipComplex they3p(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ hipComplex h3ey3p(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex aut(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
hipComplex vel(0.0,0.0);
hipComplex rav(0.0,0.0);
for(u=-40;u<40;u++)
{
vel = expc(dui*enn*z);
rav = powc(q,enn*enn);
aut = aut + (enn*enn)*rav/q*vel;
out = out + rav*vel;
enn = enn + onn;
}
return out/aut;
}
__device__ hipComplex thess(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex thess4(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex thesk(hipComplex z, hipComplex q, hipComplex r)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex roo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
roo = roo * r * r ;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r));
}
return out;
}
__device__ hipComplex thass(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex rogers( hipComplex q)
{
hipComplex onf(0.2,0.0);
hipComplex Q5 = q*q*q*q*q;
hipComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5));
return out;
}
__device__ hipComplex flat(hipComplex m)
{
float ua = sqrtf(m.r*m.r + m.i*m.i);
hipComplex out(m.r/ua,m.i/ua);
return out;
}
__device__ hipComplex eff(hipComplex z, hipComplex lambda)
{
return z*z*z*z+ lambda/(z*z*z*z);
}
__device__
unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); }
__global__
void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) {
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r= blockIdx.y*blockDim.y + threadIdx.y;
const int i = c + r*w; // 1D indexing
float pi = 3.1415926535898;
const float scale = 2.0;
float fx = -scale * (float)(DIM/2 - c)/(DIM/2);
float fy = scale * (float)(DIM/2 - r)/(DIM/2);
float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2);
float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2);
hipComplex mouse(LA,LB);
hipComplex moux(LA,0);
hipComplex mouy(0,LB);
hipComplex q(fx,fy);
/* hipComplex tik(sin(ticks/40.0f),0.0);*/
/* hipComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0));
hipComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024));
hipComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/
hipComplex fixon(.029348,.828934);
hipComplex faxon(.029348,-.828934);
hipComplex unity(1.0,0.0);
hipComplex ai(0.0,1.0);
hipComplex flurn(0.0,0.0);
hipComplex accume(0.0,0.0);
hipComplex eccume(0.0,0.0);
hipComplex rhun(1.02871376821872462237195122725097462534904479,0.0);
hipComplex cue = q;
hipComplex lam(0.73736887807831963, -0.67549029426152396);
hipComplex due(3.0,0.0);
hipComplex tir(2.0,0.0);
hipComplex selga(3.0,0.0);
hipComplex vro(-1.0,0.0);
hipComplex tle(1.0,0.0);
hipComplex sle(4.0,0.0);
hipComplex cherra(0.62348980185873359, 0.7818314824680298);
hipComplex lerra = cherra*cherra;
hipComplex ferra = lerra * cherra;
hipComplex terra = ferra * cherra;
hipComplex zerra = terra * cherra;
hipComplex nerra = zerra * cherra;
hipComplex sugna(0.70710678118654757, 0.70710678118654746);
hipComplex regna(0.99966573338968745, 0.025853848581176047);
hipComplex spa(sqrtf(2.0),0.0);
hipComplex spb(sqrtf(3.0),0.0);
hipComplex spc(sqrtf(4.0),0.0);
hipComplex spd(sqrtf(5.0),0.0);
hipComplex mrun(1/2.0,0.0);
hipComplex plenod(-.01,0.0);
/* if ((c >= w) || (r >= h)) return; // Check if within image bounds
const int i = c + r*w; // 1D indexing
const int dist = sqrtf((c - pos.x)*(c - pos.x) +
(r - pos.y)*(r - pos.y));
const unsigned char intensity = clip(255 - dist);*/
// theta function varying on constant
// cue =thess(cue,fixon*mouse);
int v=1;
int LNA;
for(v=0;v<10;v++)
{
cue = cue - (cosc(sins(cue))-sins(cosc(cue))-mouse*cue)/(sins(cue)*cosc(cosc(cue))-cosc(cue)*sins(sins(cue))-mouse);
accume = cue + mouy *accume;
}
cue = accume;
double tha;
tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi));
d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2));
d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2));
d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2));
d_out[i].w = 255;
}
void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) {
const dim3 blockSize(TX, TY);
const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY);
hipLaunchKernelGGL(( distanceKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, w, h, pos);
}
|
c1c6d87f955e0a8069b2c06fb09ad9666fc8a15b.cu
|
#include "kernel.h"
#define TX 32
#define TY 32
#define DIM 2048
struct cuComplex {
float r;
float i;
__device__ cuComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ cuComplex operator*(const cuComplex& a) {
return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ cuComplex operator-(const cuComplex& a) {
return cuComplex(r-a.r, i-a.i);
}
__device__ cuComplex operator+(const cuComplex& a) {
return cuComplex(r+a.r, i+a.i);
}
__device__ cuComplex operator/(const cuComplex& a) {
return cuComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i));
}
};
__device__ cuComplex conj(cuComplex m)
{
cuComplex out(m.r,-m.i);
return out;
}
__device__ cuComplex nor(cuComplex m)
{
cuComplex out(m.r*m.r+m.i*m.i,0.0);
return out;
}
__device__ float norg(cuComplex m)
{
return sqrtf(m.r*m.r+m.i*m.i);
}
__device__ cuComplex qpoch(cuComplex a, cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex unity(1.0,0.0);
int i = 0;
cuComplex Q = q;
if(q.magnitude2()>1.0)
{
return cuComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<80;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ cuComplex qp(cuComplex a, cuComplex q, int n) {
cuComplex out(1.0,0.0);
cuComplex unity(1.0,0.0);
int i = 0;
cuComplex Q = q;
if(q.magnitude2()>1.0)
{
return cuComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<n;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ cuComplex ramphi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,mq)/qpoch(q,mq);
}
__device__ cuComplex rampsi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,q)*qpoch(q*q,q*q);
}
__device__ cuComplex ramchi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,q*q);
}
__device__ cuComplex ramf(cuComplex a, cuComplex b) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex ma = mone*a;
cuComplex mb = mone*b;
return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b);
}
// complex exponential
__device__ cuComplex expc(cuComplex m)
{
cuComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i));
return out;
}
__device__ cuComplex powc(cuComplex ag, cuComplex bg)
{
cuComplex out(0.0,0.0);
cuComplex mesp(0.0,0.0);
cuComplex frim(0.0,0.0);
double radiu, thet;
/* get the proper polar form of the complex number */
radiu = sqrtf(ag.r*ag.r + ag.i*ag.i);
thet = atan2f(ag.i,ag.r);
/* mesp gives R^(c+di) */
mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu));
mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu));
/* frim gives e^(i theta (c+di)) */
/* now since we already have the machinery
for performing complex exponentiation (just exp), we
can just call that here */
frim.r = -1.0 * bg.i * thet;
frim.i = bg.r * thet;
frim = expc(frim);
out = mesp*frim;
return out;
}
// cosine (nothing algorithmically clean)
__device__ cuComplex cosc(cuComplex m)
{
cuComplex ai(0.0,1.0);
cuComplex ot(0.5,0.0);
cuComplex mone(-1.0,0.0);
cuComplex out = ot*(expc(m*ai) + expc(mone*m*ai));
return out;
}
__device__ cuComplex sins(cuComplex m)
{
cuComplex ai(0.0,1.0);
cuComplex ot(0.0,0.5);
cuComplex mone(-1.0,0.0);
cuComplex out = ot*(expc(m*ai) - expc(mone*m*ai));
return out;
}
__device__ cuComplex tans(cuComplex m)
{
return sins(m)/cosc(m);
}
__device__ cuComplex moeb(cuComplex t, cuComplex a, cuComplex z)
{
cuComplex out(0.0,0.0);
cuComplex ai(0.0,1.0);
cuComplex unity(1.0,0.0);
out = expc(ai*t) * (z-a)/(unity-conj(a)*z);
return out;
}
__device__ cuComplex bnewt(cuComplex z) {
cuComplex three(3.0,0.0);
cuComplex unity(1.0,0.0);
cuComplex out(0.0,0.0);
cuComplex Z =z;
cuComplex L(0.0,0.0);
cuComplex R(0.62348980185873359,0.7818314824680298);
cuComplex v(0.62348980185873359,0.7818314824680298);
int i;
for(i=0;i<100;i++)
{
L = sins(expc(Z)-cosc(Z))-Z;
out = out + v*L;
v = R * v;
Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity);
}
return out;
}
__device__ cuComplex they3(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + powc(q,enn*enn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ cuComplex they3p(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ cuComplex h3ey3p(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex aut(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
cuComplex vel(0.0,0.0);
cuComplex rav(0.0,0.0);
for(u=-40;u<40;u++)
{
vel = expc(dui*enn*z);
rav = powc(q,enn*enn);
aut = aut + (enn*enn)*rav/q*vel;
out = out + rav*vel;
enn = enn + onn;
}
return out/aut;
}
__device__ cuComplex thess(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex thess4(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex thesk(cuComplex z, cuComplex q, cuComplex r)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex roo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
roo = roo * r * r ;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r));
}
return out;
}
__device__ cuComplex thass(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex rogers( cuComplex q)
{
cuComplex onf(0.2,0.0);
cuComplex Q5 = q*q*q*q*q;
cuComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5));
return out;
}
__device__ cuComplex flat(cuComplex m)
{
float ua = sqrtf(m.r*m.r + m.i*m.i);
cuComplex out(m.r/ua,m.i/ua);
return out;
}
__device__ cuComplex eff(cuComplex z, cuComplex lambda)
{
return z*z*z*z+ lambda/(z*z*z*z);
}
__device__
unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); }
__global__
void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) {
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r= blockIdx.y*blockDim.y + threadIdx.y;
const int i = c + r*w; // 1D indexing
float pi = 3.1415926535898;
const float scale = 2.0;
float fx = -scale * (float)(DIM/2 - c)/(DIM/2);
float fy = scale * (float)(DIM/2 - r)/(DIM/2);
float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2);
float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2);
cuComplex mouse(LA,LB);
cuComplex moux(LA,0);
cuComplex mouy(0,LB);
cuComplex q(fx,fy);
/* cuComplex tik(sin(ticks/40.0f),0.0);*/
/* cuComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0));
cuComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024));
cuComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/
cuComplex fixon(.029348,.828934);
cuComplex faxon(.029348,-.828934);
cuComplex unity(1.0,0.0);
cuComplex ai(0.0,1.0);
cuComplex flurn(0.0,0.0);
cuComplex accume(0.0,0.0);
cuComplex eccume(0.0,0.0);
cuComplex rhun(1.02871376821872462237195122725097462534904479,0.0);
cuComplex cue = q;
cuComplex lam(0.73736887807831963, -0.67549029426152396);
cuComplex due(3.0,0.0);
cuComplex tir(2.0,0.0);
cuComplex selga(3.0,0.0);
cuComplex vro(-1.0,0.0);
cuComplex tle(1.0,0.0);
cuComplex sle(4.0,0.0);
cuComplex cherra(0.62348980185873359, 0.7818314824680298);
cuComplex lerra = cherra*cherra;
cuComplex ferra = lerra * cherra;
cuComplex terra = ferra * cherra;
cuComplex zerra = terra * cherra;
cuComplex nerra = zerra * cherra;
cuComplex sugna(0.70710678118654757, 0.70710678118654746);
cuComplex regna(0.99966573338968745, 0.025853848581176047);
cuComplex spa(sqrtf(2.0),0.0);
cuComplex spb(sqrtf(3.0),0.0);
cuComplex spc(sqrtf(4.0),0.0);
cuComplex spd(sqrtf(5.0),0.0);
cuComplex mrun(1/2.0,0.0);
cuComplex plenod(-.01,0.0);
/* if ((c >= w) || (r >= h)) return; // Check if within image bounds
const int i = c + r*w; // 1D indexing
const int dist = sqrtf((c - pos.x)*(c - pos.x) +
(r - pos.y)*(r - pos.y));
const unsigned char intensity = clip(255 - dist);*/
// theta function varying on constant
// cue =thess(cue,fixon*mouse);
int v=1;
int LNA;
for(v=0;v<10;v++)
{
cue = cue - (cosc(sins(cue))-sins(cosc(cue))-mouse*cue)/(sins(cue)*cosc(cosc(cue))-cosc(cue)*sins(sins(cue))-mouse);
accume = cue + mouy *accume;
}
cue = accume;
double tha;
tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi));
d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2));
d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2));
d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2));
d_out[i].w = 255;
}
void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) {
const dim3 blockSize(TX, TY);
const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY);
distanceKernel<<<gridSize, blockSize>>>(d_out, w, h, pos);
}
|
cf37c666d68552702accc8352fbcbb36c47146b8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Slice.h"
#include "Device.h"
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
extern __global__ void slice(int n, float* ptrTabGM);
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
Slice::Slice(int n, const Grid& grid)
{
this->n = n;
this->grid = grid;
this->sizeTab = n * sizeof(float);
this->ptrTab = new float[n];
this->pi = 0.0;
// MM
Device::malloc(&ptrTabGM, sizeTab);
}
Slice::~Slice()
{
Device::free(ptrTabGM);
delete[] ptrTab;
}
float Slice::getPi()
{
return this->pi;
}
void Slice::run()
{
hipLaunchKernelGGL(( slice), dim3(grid.dg), dim3(grid.db), 0, 0, n, ptrTabGM);
Device::memcpyDToH(ptrTab, ptrTabGM, sizeTab);
int i = 0;
float sum = 0.0;
#pragma omp parallel for reduction(+:sum)
for (i = 0; i < n; i++)
{
sum += ptrTab[i];
}
pi = sum / (float) n;
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
cf37c666d68552702accc8352fbcbb36c47146b8.cu
|
#include "Slice.h"
#include "Device.h"
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
extern __global__ void slice(int n, float* ptrTabGM);
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
Slice::Slice(int n, const Grid& grid)
{
this->n = n;
this->grid = grid;
this->sizeTab = n * sizeof(float);
this->ptrTab = new float[n];
this->pi = 0.0;
// MM
Device::malloc(&ptrTabGM, sizeTab);
}
Slice::~Slice()
{
Device::free(ptrTabGM);
delete[] ptrTab;
}
float Slice::getPi()
{
return this->pi;
}
void Slice::run()
{
slice<<<grid.dg, grid.db>>>(n, ptrTabGM);
Device::memcpyDToH(ptrTab, ptrTabGM, sizeTab);
int i = 0;
float sum = 0.0;
#pragma omp parallel for reduction(+:sum)
for (i = 0; i < n; i++)
{
sum += ptrTab[i];
}
pi = sum / (float) n;
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
223d4ff318a487e3ea76392a629886de755bd986.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from sparse/blas/magma_zthrsrm.cu, normal z -> s, Wed Jan 2 14:18:53 2019
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#include <hip/hip_runtime.h>
#define SWAP(a, b) { tmp = a; a = b; b = tmp; }
#define BLOCK_SIZE 128
__global__ void
zcompute_newval_kernel(
magma_int_t num_rows,
magma_index_t* Arow,
magma_index_t* Brow,
magma_index_t* Acol,
magma_index_t* Browidx,
magma_index_t* Bcol,
float* Aval,
float* Bval)
{
int tidx = blockIdx.x*blockDim.x+threadIdx.x;
magma_index_t offset_new, offset_old, end_old;
if (tidx < num_rows) {
magma_int_t count = 0;
offset_old = Arow[tidx];
offset_new = Brow[tidx];
end_old = Arow[tidx+1];
for (int i = offset_old; i < end_old; i++) {
if(Acol[i]>-1){
Bcol[offset_new+count] = Acol[i];
Bval[offset_new+count] = Aval[i];
Browidx[offset_new + count] = tidx;
count++;
}
}
}
}
//kernel
__global__ void
zcompute_nnz_kernel(
magma_int_t num_rows,
magma_index_t* Arow,
magma_index_t* Brow,
magma_index_t* Acol,
float* Aval,
float thrs)
{
int row= blockIdx.x*blockDim.x+threadIdx.x;
if (row < num_rows) {
magma_int_t rm = 0;
magma_int_t el = 0;
for (int i = Arow[row]; i<Arow[row+1]; i++) {
if (MAGMA_S_ABS(Aval[i]) <= thrs ) {
if (Acol[i] != row) {
Acol[i] = -1;//cheaperthanval
rm++;
} else {
el++;
}
} else {
el++;
}
}
Brow[row] = el;
}
}
/**
Purpose
-------
This routine selects a threshold separating the subset_size smallest
magnitude elements from the rest.
Arguments
---------
@param[in]
order magma_int_t
dummy variable for now.
@param[in,out]
A magma_s_matrix*
input/output matrix where elements are removed
@param[out]
thrs float*
computed threshold
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_saux
********************************************************************/
extern "C" magma_int_t
magma_sthrsholdrm_gpu(
magma_int_t order,
magma_s_matrix* A,
float* thrs,
magma_queue_t queue)
{
magma_int_t info = 0;
magma_int_t num_blocks = magma_ceildiv(A->num_rows,BLOCK_SIZE);
magma_s_matrix B={Magma_CSR};
B.num_rows = A->num_rows;
B.num_cols = A->num_cols;
B.storage_type = A->storage_type;
B.memory_location = Magma_DEV;
magma_index_t *new_rownnz={NULL};
dim3 block(BLOCK_SIZE, 1, 1);
dim3 grid(num_blocks, 1, 1 );
magma_index_malloc(&new_rownnz,A->num_rows);
magma_index_malloc(&B.drow,A->num_rows+1);
hipLaunchKernelGGL(( zcompute_nnz_kernel), dim3(grid), dim3(block), 0, queue->cuda_stream(),
A->num_rows, A->drow, new_rownnz, A->dcol, A->dval,*thrs);
magma_sget_row_ptr(A->num_rows, &B.nnz, new_rownnz, B.drow, queue);
magma_smalloc(&B.dval,B.nnz);
magma_index_malloc(&B.rowidx,B.nnz);
magma_index_malloc(&B.dcol,B.nnz);
hipLaunchKernelGGL(( zcompute_newval_kernel), dim3(grid), dim3(block), 0, queue->cuda_stream(),
A->num_rows, A->drow, B.drow, A->dcol,B.drowidx, B.dcol, A->dval, B.dval);
//Rewrite the matrix with all the new values
magma_smatrix_swap(&B, A, queue);
magma_smfree(&B, queue);
magma_free(new_rownnz);
return info;
}
|
223d4ff318a487e3ea76392a629886de755bd986.cu
|
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from sparse/blas/magma_zthrsrm.cu, normal z -> s, Wed Jan 2 14:18:53 2019
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#include <cuda_runtime.h>
#define SWAP(a, b) { tmp = a; a = b; b = tmp; }
#define BLOCK_SIZE 128
__global__ void
zcompute_newval_kernel(
magma_int_t num_rows,
magma_index_t* Arow,
magma_index_t* Brow,
magma_index_t* Acol,
magma_index_t* Browidx,
magma_index_t* Bcol,
float* Aval,
float* Bval)
{
int tidx = blockIdx.x*blockDim.x+threadIdx.x;
magma_index_t offset_new, offset_old, end_old;
if (tidx < num_rows) {
magma_int_t count = 0;
offset_old = Arow[tidx];
offset_new = Brow[tidx];
end_old = Arow[tidx+1];
for (int i = offset_old; i < end_old; i++) {
if(Acol[i]>-1){
Bcol[offset_new+count] = Acol[i];
Bval[offset_new+count] = Aval[i];
Browidx[offset_new + count] = tidx;
count++;
}
}
}
}
//kernel
__global__ void
zcompute_nnz_kernel(
magma_int_t num_rows,
magma_index_t* Arow,
magma_index_t* Brow,
magma_index_t* Acol,
float* Aval,
float thrs)
{
int row= blockIdx.x*blockDim.x+threadIdx.x;
if (row < num_rows) {
magma_int_t rm = 0;
magma_int_t el = 0;
for (int i = Arow[row]; i<Arow[row+1]; i++) {
if (MAGMA_S_ABS(Aval[i]) <= thrs ) {
if (Acol[i] != row) {
Acol[i] = -1;//cheaperthanval
rm++;
} else {
el++;
}
} else {
el++;
}
}
Brow[row] = el;
}
}
/**
Purpose
-------
This routine selects a threshold separating the subset_size smallest
magnitude elements from the rest.
Arguments
---------
@param[in]
order magma_int_t
dummy variable for now.
@param[in,out]
A magma_s_matrix*
input/output matrix where elements are removed
@param[out]
thrs float*
computed threshold
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_saux
********************************************************************/
extern "C" magma_int_t
magma_sthrsholdrm_gpu(
magma_int_t order,
magma_s_matrix* A,
float* thrs,
magma_queue_t queue)
{
magma_int_t info = 0;
magma_int_t num_blocks = magma_ceildiv(A->num_rows,BLOCK_SIZE);
magma_s_matrix B={Magma_CSR};
B.num_rows = A->num_rows;
B.num_cols = A->num_cols;
B.storage_type = A->storage_type;
B.memory_location = Magma_DEV;
magma_index_t *new_rownnz={NULL};
dim3 block(BLOCK_SIZE, 1, 1);
dim3 grid(num_blocks, 1, 1 );
magma_index_malloc(&new_rownnz,A->num_rows);
magma_index_malloc(&B.drow,A->num_rows+1);
zcompute_nnz_kernel<<<grid, block, 0, queue->cuda_stream()>>>
(A->num_rows, A->drow, new_rownnz, A->dcol, A->dval,*thrs);
magma_sget_row_ptr(A->num_rows, &B.nnz, new_rownnz, B.drow, queue);
magma_smalloc(&B.dval,B.nnz);
magma_index_malloc(&B.rowidx,B.nnz);
magma_index_malloc(&B.dcol,B.nnz);
zcompute_newval_kernel<<<grid, block, 0, queue->cuda_stream()>>>
(A->num_rows, A->drow, B.drow, A->dcol,B.drowidx, B.dcol, A->dval, B.dval);
//Rewrite the matrix with all the new values
magma_smatrix_swap(&B, A, queue);
magma_smfree(&B, queue);
magma_free(new_rownnz);
return info;
}
|
7fe7aa7547fcebe5ebe21a27e07e55cccd0fec4e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <MatKernelD.hpp>
#include <thrust/sort.h>
//#include <hipcub/hipcub.hpp>
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
#else
__device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
#if __CUDA_ARCH__ > 200
#define MAXXGRID 2147483647
#else
#define MAXXGRID 65535
#endif
int getDeviceVersionD() {
int igpu;
hipGetDevice(&igpu);
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, igpu);
return 100 * prop.major + 10 * prop.minor;
}
void setsizesD(long long N, dim3 *gridp, int *nthreadsp) {
int nblocks = 1;
int nthreads = 32;
int threads_per_block = 1024;
// int version;
// version = getDeviceVersionD();
// if (version == 320) threads_per_block = 512;
while (1L * nblocks * nthreads < N) {
if (nblocks < 16) {
nblocks = 2*nblocks;
} else if (nthreads < threads_per_block) {
nthreads = 2*nthreads;
} else {
nblocks = 2*nblocks;
}
}
gridp->y = 1 + (nblocks-1)/65536;
gridp->x = 1 + (nblocks-1)/gridp->y;
gridp->z = 1;
*nthreadsp = nthreads;
}
void setsizesLeanD(long long N, dim3 *gridp, int *nthreadsp) {
int nblocks = 1;
int nthreads = 32;
int threads_per_block = 1024;
// int version;
// version = getDeviceVersionD();
// if (version == 320) threads_per_block = 512;
while (1L * nblocks * nthreads < N) {
if (nblocks < 16) {
nblocks = 2*nblocks;
} else if (nthreads < threads_per_block) {
nthreads = 2*nthreads;
} else {
nblocks = max(nblocks, 1 + (int)((N-1)/nthreads));
}
}
gridp->y = 1 + (nblocks-1)/65536;
gridp->x = 1 + (nblocks-1)/gridp->y;
gridp->z = 1;
*nthreadsp = nthreads;
}
template <class T>
__global__ void __toDouble(T *A, double *B, int N) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
B[i] = (double)(A[i]);
}
}
__global__ void __toInt(double *A, int *B, int N) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
B[i] = (int)(A[i]);
}
}
int IntToDouble(int *A, double *B, int N) {
int nthreads;
dim3 griddims;
setsizesLeanD(N, &griddims, &nthreads);
hipLaunchKernelGGL(( __toDouble<int>), dim3(griddims),dim3(nthreads), 0, 0, A, B, N);
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
int FloatToDouble(float *A, double *B, int N) {
int nthreads;
dim3 griddims;
setsizesLeanD(N, &griddims, &nthreads);
hipLaunchKernelGGL(( __toDouble<float>), dim3(griddims),dim3(nthreads), 0, 0, A, B, N);
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
int toInt(double *A, int *B, int N) {
int nthreads;
dim3 griddims;
setsizesLeanD(N, &griddims, &nthreads);
hipLaunchKernelGGL(( __toInt), dim3(griddims),dim3(nthreads), 0, 0, A, B, N);
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
__global__ void __full(int *ir, int *ic, double *data, double *od, int nrows, int ncols, int nnz) {
int i, row, col;
double v;
int id = threadIdx.x + blockIdx.x * blockDim.x;
for (i = id; i < nnz; i += blockDim.x * gridDim.x) {
v = data[i];
row = ir[i];
col = ic[i];
od[row + col * nrows] = v;
}
}
int full(int *ir, int *ic, double *data, double *od, int nrows, int ncols, int nnz) {
int nblocks = min(32, 1+(nnz-1)/32);
int nthreads = max(32, min(1+(nnz-1)/nblocks, 1024));
hipLaunchKernelGGL(( __full), dim3(nblocks),dim3(nthreads), 0, 0, ir, ic, data, od, nrows, ncols, nnz);
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
__global__ void __set_val(double *A, double val, int length) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < length; i += blockDim.x * gridDim.x * gridDim.y) {
A[i] = val;
}
}
int set_val(double *A, double val, int length) {
int nthreads;
dim3 griddims;
setsizesLeanD(length, &griddims, &nthreads);
hipLaunchKernelGGL(( __set_val), dim3(griddims),dim3(nthreads), 0, 0, A, val, length);
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
int set_ival(double *A, int val, int length) {
int nthreads;
dim3 griddims;
setsizesLeanD(length, &griddims, &nthreads);
hipLaunchKernelGGL(( __set_val), dim3(griddims),dim3(nthreads), 0, 0, A, *((double *)&val), length);
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
__global__ void __copyToInds(double *A, double *B, int *I, long long len) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int step = blockDim.x * gridDim.x * gridDim.y;
long long i;
for (i = tid; i < len; i += step) {
B[I[i]] = A[i];
}
}
int copyToInds(double *A, double *B, int *I, long long len) {
int nthreads;
dim3 griddims;
setsizesLeanD(len, &griddims, &nthreads);
hipLaunchKernelGGL(( __copyToInds), dim3(griddims),dim3(nthreads), 0, 0, A, B, I, len);
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
template<typename T>
__global__ void __copyFromInds(T *A, T *B, int *I, long long len) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int step = blockDim.x * gridDim.x * gridDim.y;
long long i;
for (i = tid; i < len; i += step) {
B[i] = A[I[i]];
}
}
int copyFromInds(double *A, double *B, int *I, long long len) {
int nthreads;
dim3 griddims;
setsizesLeanD(len, &griddims, &nthreads);
hipLaunchKernelGGL(( __copyFromInds), dim3(griddims),dim3(nthreads), 0, 0, A, B, I, len);
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
// Implement B[I,J] = A
// indexed copy: version with one block per column
#define COPYTOINDS2DA(DFNAME,IEXPR,JEXPR) \
__global__ void __copyToInds2D##DFNAME(double *A, int lda, double *B, int ldb, int *I, int nrows, int *J, int ncols) { \
int iblock = blockIdx.x + blockIdx.y * gridDim.x; \
if (iblock < ncols) { \
int icol = JEXPR; \
for (int i = threadIdx.x; i < nrows; i += blockDim.x) { \
B[IEXPR + icol * ldb] = A[i + iblock * lda]; \
} \
} \
}
COPYTOINDS2DA(nn,I[i],J[iblock])
COPYTOINDS2DA(xn,i,J[iblock])
COPYTOINDS2DA(nx,I[i],iblock)
COPYTOINDS2DA(xx,i,iblock)
// Implement B[I,J] = A
// indexed copy: version with one thread per element
#define COPYTOINDS2DB(DFNAME,IEXPR,JEXPR) \
__global__ void __copyToInds2DB##DFNAME(double *A, int lda, double *B, int ldb, int *I, int nrows, int *J, int ncols) { \
int indx = threadIdx.x + blockDim.x * (blockIdx.x + blockIdx.y * gridDim.x); \
if (indx < nrows * ncols) { \
int irow = indx % nrows; \
int icol = indx / nrows; \
B[IEXPR + JEXPR * ldb] = A[irow + icol * lda]; \
} \
}
COPYTOINDS2DB(nn,I[irow],J[icol])
COPYTOINDS2DB(xn,irow,J[icol])
COPYTOINDS2DB(nx,I[irow],icol)
COPYTOINDS2DB(xx,irow,icol)
// Implement B[I,J] = A
int copyToInds2D(double *A, int lda, double *B, int ldb, int *I, int nrows, int *J, int ncols) {
int len = nrows * ncols;
int nthreads = max(32, min(1024, nrows));
int nblocks = min(ncols, (len-1)/nthreads + 1);
dim3 griddims;
griddims.x = 1;
griddims.y = 1;
griddims.z = 1;
if (nblocks < 65536) {
griddims.x = nblocks;
} else {
int vs = (int)sqrt((double)nblocks);
griddims.x = vs;
griddims.y = (nblocks-1)/vs + 1;
}
if (nblocks == ncols) {
if (I == NULL) {
if (J == NULL) {
hipLaunchKernelGGL(( __copyToInds2Dxx), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
} else {
hipLaunchKernelGGL(( __copyToInds2Dxn), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
hipLaunchKernelGGL(( __copyToInds2Dnx), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
} else {
hipLaunchKernelGGL(( __copyToInds2Dnn), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
}
}
} else {
if (I == NULL) {
if (J == NULL) {
hipLaunchKernelGGL(( __copyToInds2DBxx), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
} else {
hipLaunchKernelGGL(( __copyToInds2DBxn), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
hipLaunchKernelGGL(( __copyToInds2DBnx), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
} else {
hipLaunchKernelGGL(( __copyToInds2DBnn), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
}
}
}
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
__global__ void __copyToInds3D(double *A, int lda, int rda, double *B, int ldb, int rdb, int *I, int nrows, int *J, int ncols, int *K, int nk) {
int ii = threadIdx.x + blockDim.x * blockIdx.x;
int jj = threadIdx.y + blockDim.y * blockIdx.y;
int kk = threadIdx.z + blockDim.z * blockIdx.z;
int i, j, k, mapi, mapj, mapk;
for (k = kk; k < nk; k += blockDim.z * gridDim.z) {
mapk = k;
if (K != NULL) mapk = K[k];
for (j = jj; j < ncols; j += blockDim.y * gridDim.y) {
mapj = j;
if (J != NULL) mapj = J[j];
if (I != NULL) {
for (i = ii; i < nrows; i += blockDim.x * gridDim.x) {
mapi = I[i];
B[mapi + ldb * (mapj + rdb * mapk)] = A[i + lda * (j + rda * k)];
}
} else {
for (i = ii; i < nrows; i += blockDim.x * gridDim.x) {
mapi = i;
B[mapi + ldb * (mapj + rdb * mapk)] = A[i + lda * (j + rda * k)];
}
}
}
}
}
int copyToInds3D(double *A, int lda, int rda, double *B, int ldb, int rdb, int *I, int nrows, int *J, int ncols, int *K, int nk) {
int ntx, nty, ntz, nbx, nby, nbz;
ntx = min(nrows, 1024);
nbx = min((nrows - 1) / ntx + 1, 1024);
nty = min(ncols, 1024/ntx);
nby = min((ncols - 1) / nty + 1, 1024);
ntz = min(nk, 1024/ntx/nty);
nbz = min((nk - 1) / ntz + 1, 1024);
dim3 blockdims(ntx, nty, ntz);
dim3 griddims(nbx, nby, nbz);
hipLaunchKernelGGL(( __copyToInds3D), dim3(griddims),dim3(blockdims), 0, 0, A, lda, rda, B, ldb, rdb, I, nrows, J, ncols, K, nk);
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
__global__ void __copyToInds4D(double *A, int lda, int rda, int tda, double *B, int ldb, int rdb, int tdb, int *I, int nrows, int *J, int ncols, int *K, int nk, int *L, int nl, int ntk, int nbk, int ntl, int nbl) {
int ii = threadIdx.x + blockDim.x * blockIdx.x;
int jj = threadIdx.y + blockDim.y * blockIdx.y;
int tk = threadIdx.z / ntl;
int tl = threadIdx.z - tk * ntl;
int bk = blockIdx.z / nbl;
int bl = blockIdx.z - bk * nbl;
int kk = tk + ntk * bk;
int ll = tl + ntl * bl;
int i, j, k, l, mapi, mapj, mapk, mapl;
for (l = ll; l < nl; l += ntl * nbl) {
mapl = l;
if (L != NULL) mapl = L[l];
for (k = kk; k < nk; k += ntk * nbk) {
mapk = k;
if (K != NULL) mapk = K[k];
for (j = jj; j < ncols; j += blockDim.y * gridDim.y) {
mapj = j;
if (J != NULL) mapj = J[j];
if (I != NULL) {
for (i = ii; i < nrows; i += blockDim.x * gridDim.x) {
mapi = I[i];
B[mapi + ldb * (mapj + rdb * (mapk + tdb * mapl))] = A[i + lda * (j + rda * (k + tda * l))];
}
} else {
for (i = ii; i < nrows; i += blockDim.x * gridDim.x) {
B[i + ldb * (mapj + rdb * (mapk + tdb * mapl))] = A[i + lda * (j + rda * (k + tda * l))];
}
}
}
}
}
}
int copyToInds4D(double *A, int lda, int rda, int tda, double *B, int ldb, int rdb, int tdb, int *I, int nrows, int *J, int ncols, int *K, int nk, int *L, int nl) {
int ntx, nty, ntk, ntl, nbx, nby, nbk, nbl;
ntx = min(nrows, 1024);
nbx = min((nrows - 1) / ntx + 1, 1024);
nty = min(ncols, 1024/ntx);
nby = min((ncols - 1) / nty + 1, 1024);
ntk = min(nk, 1024/ntx/nty);
nbk = min((nk - 1) / ntk + 1, 255);
ntl = min(nl, 1024/ntx/nty/ntk);
nbl = min((nl - 1) / ntl + 1, 255);
dim3 blockdims(ntx, nty, ntk * ntl);
dim3 griddims(nbx, nby, nbk * nbl);
hipLaunchKernelGGL(( __copyToInds4D), dim3(griddims),dim3(blockdims), 0, 0, A, lda, rda, tda, B, ldb, rdb, tdb, I, nrows, J, ncols, K, nk, L, nl, ntk, nbk, ntl, nbl);
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
__global__ void __fillToInds(double A, double *B, int *I, long long len) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int step = blockDim.x * gridDim.x * gridDim.y;
long long i;
for (i = tid; i < len; i += step) {
B[I[i]] = A;
}
}
int fillToInds(double A, double *B, int *I, long long len) {
int nthreads;
dim3 griddims;
setsizesLeanD(len, &griddims, &nthreads);
hipLaunchKernelGGL(( __fillToInds), dim3(griddims),dim3(nthreads), 0, 0, A, B, I, len);
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
// Implement B[I,J] = c
// indexed copy: version with one block per column
#define FILLTOINDS2DA(DFNAME,IEXPR,JEXPR,ETYPE) \
__global__ void __fillToInds2D##DFNAME(ETYPE A, ETYPE *B, int ldb, int *I, int nrows, int *J, int ncols) { \
int iblock = blockIdx.x + blockIdx.y * gridDim.x; \
if (iblock < ncols) { \
int icol = JEXPR; \
for (int i = threadIdx.x; i < nrows; i += blockDim.x) { \
B[IEXPR + icol * ldb] = A; \
} \
} \
}
FILLTOINDS2DA(nn,I[i],J[iblock],double)
FILLTOINDS2DA(xn,i,J[iblock],double)
FILLTOINDS2DA(nx,I[i],iblock,double)
FILLTOINDS2DA(xx,i,iblock,double)
// Implement B[I,J] = A
// indexed copy: version with one thread per element
#define FILLTOINDS2DB(DFNAME,IEXPR,JEXPR,ETYPE) \
__global__ void __fillToInds2DB##DFNAME(ETYPE A, ETYPE *B, int ldb, int *I, int nrows, int *J, int ncols) { \
int indx = threadIdx.x + blockDim.x * (blockIdx.x + blockIdx.y * gridDim.x); \
if (indx < nrows * ncols) { \
int irow = indx % nrows; \
int icol = indx / nrows; \
B[IEXPR + JEXPR * ldb] = A; \
} \
}
FILLTOINDS2DB(nn,I[irow],J[icol],double)
FILLTOINDS2DB(xn,irow,J[icol],double)
FILLTOINDS2DB(nx,I[irow],icol,double)
FILLTOINDS2DB(xx,irow,icol,double)
int fillToInds2D(double A, double *B, int ldb, int *I, int nrows, int *J, int ncols) {
int len = nrows * ncols;
int nthreads = max(32, min(1024, nrows));
int nblocks = min(ncols, (len-1)/nthreads + 1);
dim3 griddims;
griddims.x = 1;
griddims.y = 1;
griddims.z = 1;
if (nblocks < 65536) {
griddims.x = nblocks;
} else {
int vs = (int)sqrt((float)nblocks);
griddims.x = vs;
griddims.y = (nblocks-1)/vs + 1;
}
if (nblocks == ncols) {
if (I == NULL) {
if (J == NULL) {
hipLaunchKernelGGL(( __fillToInds2Dxx), dim3(griddims),dim3(nthreads), 0, 0, A, B, ldb, I, nrows, J, ncols);
} else {
hipLaunchKernelGGL(( __fillToInds2Dxn), dim3(griddims),dim3(nthreads), 0, 0, A, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
hipLaunchKernelGGL(( __fillToInds2Dnx), dim3(griddims),dim3(nthreads), 0, 0, A, B, ldb, I, nrows, J, ncols);
} else {
hipLaunchKernelGGL(( __fillToInds2Dnn), dim3(griddims),dim3(nthreads), 0, 0, A, B, ldb, I, nrows, J, ncols);
}
}
} else {
if (I == NULL) {
if (J == NULL) {
hipLaunchKernelGGL(( __fillToInds2DBxx), dim3(griddims),dim3(nthreads), 0, 0, A, B, ldb, I, nrows, J, ncols);
} else {
hipLaunchKernelGGL(( __fillToInds2DBxn), dim3(griddims),dim3(nthreads), 0, 0, A, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
hipLaunchKernelGGL(( __fillToInds2DBnx), dim3(griddims),dim3(nthreads), 0, 0, A, B, ldb, I, nrows, J, ncols);
} else {
hipLaunchKernelGGL(( __fillToInds2DBnn), dim3(griddims),dim3(nthreads), 0, 0, A, B, ldb, I, nrows, J, ncols);
}
}
}
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
__global__ void __fillToInds3D(double A, double *B, int ldb, int rdb, int *I, int nrows, int *J, int ncols, int *K, int nk) {
int ii = threadIdx.x + blockDim.x * blockIdx.x;
int jj = threadIdx.y + blockDim.y * blockIdx.y;
int kk = threadIdx.z + blockDim.z * blockIdx.z;
int i, j, k, mapi, mapj, mapk;
for (k = kk; k < nk; k += blockDim.z * gridDim.z) {
mapk = k;
if (K != NULL) mapk = K[k];
for (j = jj; j < ncols; j += blockDim.y * gridDim.y) {
mapj = j;
if (J != NULL) mapj = J[j];
if (I != NULL) {
for (i = ii; i < nrows; i += blockDim.x * gridDim.x) {
mapi = I[i];
B[mapi + ldb * (mapj + rdb * mapk)] = A;
}
} else {
for (i = ii; i < nrows; i += blockDim.x * gridDim.x) {
mapi = i;
B[mapi + ldb * (mapj + rdb * mapk)] = A;
}
}
}
}
}
int fillToInds3D(double A, double *B, int ldb, int rdb, int *I, int nrows, int *J, int ncols, int *K, int nk) {
int ntx, nty, ntz, nbx, nby, nbz;
ntx = min(nrows, 1024);
nbx = min((nrows - 1) / ntx + 1, 1024);
nty = min(ncols, 1024/ntx);
nby = min((ncols - 1) / nty + 1, 1024);
ntz = min(nk, 1024/ntx/nty);
nbz = min((nk - 1) / ntz + 1, 1024);
dim3 blockdims(ntx, nty, ntz);
dim3 griddims(nbx, nby, nbz);
hipLaunchKernelGGL(( __fillToInds3D), dim3(griddims),dim3(blockdims), 0, 0, A, B, ldb, rdb, I, nrows, J, ncols, K, nk);
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
__global__ void __fillToInds4D(double A, double *B, int ldb, int rdb, int tdb, int *I, int nrows, int *J, int ncols, int *K, int nk, int *L, int nl, int ntk, int nbk, int ntl, int nbl) {
int ii = threadIdx.x + blockDim.x * blockIdx.x;
int jj = threadIdx.y + blockDim.y * blockIdx.y;
int tk = threadIdx.z / ntl;
int tl = threadIdx.z - tk * ntl;
int bk = blockIdx.z / nbl;
int bl = blockIdx.z - bk * nbl;
int kk = tk + ntk * bk;
int ll = tl + ntl * bl;
int i, j, k, l, mapi, mapj, mapk, mapl;
for (l = ll; l < nl; l += ntl * nbl) {
mapl = l;
if (L != NULL) mapl = L[l];
for (k = kk; k < nk; k += ntk * nbk) {
mapk = k;
if (K != NULL) mapk = K[k];
for (j = jj; j < ncols; j += blockDim.y * gridDim.y) {
mapj = j;
if (J != NULL) mapj = J[j];
if (I != NULL) {
for (i = ii; i < nrows; i += blockDim.x * gridDim.x) {
mapi = I[i];
B[mapi + ldb * (mapj + rdb * (mapk + tdb * mapl))] = A;
}
} else {
for (i = ii; i < nrows; i += blockDim.x * gridDim.x) {
B[i + ldb * (mapj + rdb * (mapk + tdb * mapl))] = A;
}
}
}
}
}
}
int fillToInds4D(double A, double *B, int ldb, int rdb, int tdb, int *I, int nrows, int *J, int ncols, int *K, int nk, int *L, int nl) {
int ntx, nty, ntk, ntl, nbx, nby, nbk, nbl;
ntx = min(nrows, 1024);
nbx = min((nrows - 1) / ntx + 1, 1024);
nty = min(ncols, 1024/ntx);
nby = min((ncols - 1) / nty + 1, 1024);
ntk = min(nk, 1024/ntx/nty);
nbk = min((nk - 1) / ntk + 1, 255);
ntl = min(nl, 1024/ntx/nty/ntk);
nbl = min((nl - 1) / ntl + 1, 255);
dim3 blockdims(ntx, nty, ntk * ntl);
dim3 griddims(nbx, nby, nbk * nbl);
hipLaunchKernelGGL(( __fillToInds4D), dim3(griddims),dim3(blockdims), 0, 0, A, B, ldb, rdb, tdb, I, nrows, J, ncols, K, nk, L, nl, ntk, nbk, ntl, nbl);
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
// Implement B = A[I,J]
// indexed copy: version with one block per column
#define COPYFROMINDS2DA(FNAME,IEXPR,JEXPR) \
__global__ void __copyFromInds2D##FNAME(double *A, int lda, double *B, int ldb, int *I, int nrows, int *J, int ncols) { \
int iblock = blockIdx.x + blockIdx.y * gridDim.x; \
if (iblock < ncols) { \
int icol = JEXPR; \
for (int i = threadIdx.x; i < nrows; i += blockDim.x) { \
B[i + iblock * ldb] = A[IEXPR + icol * lda]; \
} \
} \
}
COPYFROMINDS2DA(nn,I[i],J[iblock])
COPYFROMINDS2DA(xn,i,J[iblock])
COPYFROMINDS2DA(nx,I[i],iblock)
COPYFROMINDS2DA(xx,i,iblock)
// Implement B = A[I,J]
// indexed copy: version with one thread per element
#define COPYFROMINDS2DB(FNAME,IEXPR,JEXPR) \
__global__ void __copyFromInds2DB##FNAME(double *A, int lda, double *B, int ldb, int *I, int nrows, int *J, int ncols) { \
int indx = threadIdx.x + blockDim.x * (blockIdx.x + blockIdx.y * gridDim.x); \
if (indx < nrows * ncols) { \
int irow = indx % nrows; \
int icol = indx / nrows; \
B[irow + icol * ldb] = A[IEXPR + JEXPR * lda]; \
} \
}
COPYFROMINDS2DB(nn,I[irow],J[icol])
COPYFROMINDS2DB(xn,irow,J[icol])
COPYFROMINDS2DB(nx,I[irow],icol)
COPYFROMINDS2DB(xx,irow,icol)
// Implement B = A[I,J]
int copyFromInds2D(double *A, int lda, double *B, int ldb, int *I, int nrows, int *J, int ncols) {
int len = nrows * ncols;
int nthreads = max(32, min(1024, nrows));
int nblocks = min(ncols, (len-1)/nthreads + 1);
dim3 griddims;
griddims.x = 1;
griddims.y = 1;
griddims.z = 1;
if (nblocks < 65536) {
griddims.x = nblocks;
} else {
int vs = (int)sqrt((float)nblocks);
griddims.x = vs;
griddims.y = (nblocks-1)/vs + 1;
}
if (nblocks == ncols) {
if (I == NULL) {
if (J == NULL) {
hipLaunchKernelGGL(( __copyFromInds2Dxx), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
} else {
hipLaunchKernelGGL(( __copyFromInds2Dxn), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
hipLaunchKernelGGL(( __copyFromInds2Dnx), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
} else {
hipLaunchKernelGGL(( __copyFromInds2Dnn), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
}
}
} else {
if (I == NULL) {
if (J == NULL) {
hipLaunchKernelGGL(( __copyFromInds2DBxx), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
} else {
hipLaunchKernelGGL(( __copyFromInds2DBxn), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
hipLaunchKernelGGL(( __copyFromInds2DBnx), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
} else {
hipLaunchKernelGGL(( __copyFromInds2DBnn), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
}
}
}
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
__global__ void __copyFromInds3D(double *A, int lda, int rda, double *B, int ldb, int rdb, int *I, int nrows, int *J, int ncols, int *K, int nk) {
int ii = threadIdx.x + blockDim.x * blockIdx.x;
int jj = threadIdx.y + blockDim.y * blockIdx.y;
int kk = threadIdx.z + blockDim.z * blockIdx.z;
int i, j, k, mapi, mapj, mapk;
for (k = kk; k < nk; k += blockDim.z * gridDim.z) {
mapk = k;
if (K != NULL) mapk = K[k];
for (j = jj; j < ncols; j += blockDim.y * gridDim.y) {
mapj = j;
if (J != NULL) mapj = J[j];
if (I != NULL) {
for (i = ii; i < nrows; i += blockDim.x * gridDim.x) {
mapi = I[i];
B[i + ldb * (j + rdb * k)] = A[mapi + lda * (mapj + rda * mapk)];
}
} else {
for (i = ii; i < nrows; i += blockDim.x * gridDim.x) {
mapi = i;
B[i + ldb * (j + rdb * k)] = A[mapi + lda * (mapj + rda * mapk)];
}
}
}
}
}
int copyFromInds3D(double *A, int lda, int rda, double *B, int ldb, int rdb, int *I, int nrows, int *J, int ncols, int *K, int nk) {
int ntx, nty, ntz, nbx, nby, nbz;
ntx = min(nrows, 1024);
nbx = (nrows - 1) / ntx + 1;
nty = min(ncols, 1024/ntx);
nby = (ncols - 1) / nty + 1;
ntz = min(nk, 1024/(ntx*nty));
nbz = (nk - 1) / ntz + 1;
dim3 blockdims(ntx, nty, ntz);
dim3 griddims(nbx, nby, nbz);
hipLaunchKernelGGL(( __copyFromInds3D), dim3(griddims),dim3(blockdims), 0, 0, A, lda, rda, B, ldb, rdb, I, nrows, J, ncols, K, nk);
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
__global__ void __copyFromInds4D(double *A, int lda, int rda, int tda, double *B, int ldb, int rdb, int tdb, int *I, int nrows, int *J, int ncols, int *K, int nk, int *L, int nl, int ntk, int nbk, int ntl, int nbl) {
int ii = threadIdx.x + blockDim.x * blockIdx.x;
int jj = threadIdx.y + blockDim.y * blockIdx.y;
int tk = threadIdx.z / ntl;
int tl = threadIdx.z - tk * ntl;
int bk = blockIdx.z / nbl;
int bl = blockIdx.z - bk * nbl;
int kk = tk + ntk * bk;
int ll = tl + ntl * bl;
int i, j, k, l, mapi, mapj, mapk, mapl;
for (l = ll; l < nl; l += ntl * nbl) {
mapl = l;
if (L != NULL) mapl = L[l];
for (k = kk; k < nk; k += ntk * nbk) {
mapk = k;
if (K != NULL) mapk = K[k];
for (j = jj; j < ncols; j += blockDim.y * gridDim.y) {
mapj = j;
if (J != NULL) mapj = J[j];
if (I != NULL) {
for (i = ii; i < nrows; i += blockDim.x * gridDim.x) {
mapi = I[i];
B[i + ldb * (j + rdb * (k + tdb * l))] = A[mapi + lda * (mapj + rda * (mapk + tda * mapl))];
}
} else {
for (i = ii; i < nrows; i += blockDim.x * gridDim.x) {
B[i + ldb * (j + rdb * (k + tdb * l))] = A[i + lda * (mapj + rda * (mapk + tda * mapl))];
}
}
}
}
}
}
int copyFromInds4D(double *A, int lda, int rda, int tda, double *B, int ldb, int rdb, int tdb, int *I, int nrows, int *J, int ncols, int *K, int nk, int *L, int nl) {
int ntx, nty, ntk, ntl, nbx, nby, nbk, nbl;
ntx = min(nrows, 1024);
nbx = min((nrows - 1) / ntx + 1, 1024);
nty = min(ncols, 1024/ntx);
nby = min((ncols - 1) / nty + 1, 1024);
ntk = min(nk, 1024/ntx/nty);
nbk = min((nk - 1) / ntk + 1, 255);
ntl = min(nl, 1024/ntx/nty/ntk);
nbl = min((nl - 1) / ntl + 1, 255);
dim3 blockdims(ntx, nty, ntk * ntl);
dim3 griddims(nbx, nby, nbk * nbl);
hipLaunchKernelGGL(( __copyFromInds4D), dim3(griddims),dim3(blockdims), 0, 0, A, lda, rda, tda, B, ldb, rdb, tdb, I, nrows, J, ncols, K, nk, L, nl, ntk, nbk, ntl, nbl);
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
__global__ void __dsmult(int nrows, int nnz, double *A, double *Bdata, int *Bir, int *Bic, double *C) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int i = threadIdx.x; i < nrows; i += blockDim.x) {
double sum = 0;
for (int j = jstart; j < jend ; j++) {
sum += A[i + nrows * Bir[j]] * Bdata[j];
if (j == jend-1 || Bic[j] != Bic[j+1]) {
atomicAdd(&C[i + nrows * Bic[j]], sum);
sum = 0;
}
}
}
}
__global__ void __dsmultx(int nrows, int nnz, double *A, double *Bdata, int *Bir, int *Bic, double *C) {
int bid = threadIdx.y + blockDim.y * blockIdx.x;
int nb = blockDim.y * gridDim.x;
int jstart = ((long long)bid) * nnz / nb;
int jend = ((long long)(bid + 1)) * nnz / nb;
double sum = 0;
for (int j = jstart; j < jend ; j++) {
sum += A[threadIdx.x + nrows * Bir[j]] * Bdata[j];
if (j == jend-1 || Bic[j] != Bic[j+1]) {
atomicAdd(&C[threadIdx.x + nrows * Bic[j]], sum);
sum = 0;
}
}
}
int dsmult(int nrows, int ncols, int nnz, double *A, double *Bdata, int *Bir, int *Bic, double *C) {
if (nrows < 128) {
int nt = max(1, min(ncols/2, 256/nrows));
dim3 threadDim(nrows, nt, 1);
int nblocks = min(MAXXGRID, max(1, ncols/nt));
hipLaunchKernelGGL(( __dsmultx), dim3(nblocks),dim3(threadDim), 0, 0, nrows, nnz, A, Bdata, Bir, Bic, C);
} else {
int nthreads = min(1024, nrows);
int nblocks = min(MAXXGRID, ncols);
hipLaunchKernelGGL(( __dsmult), dim3(nblocks),dim3(nthreads), 0, 0, nrows, nnz, A, Bdata, Bir, Bic, C);
}
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
int dsmult_tune(int nrows, int ncols, int nnz, double *A, double *Bdata, int *Bir, int *Bic, double *C, int nblocks, int nthreads) {
hipLaunchKernelGGL(( __dsmult), dim3(nblocks),dim3(nthreads), 0, 0, nrows, nnz, A, Bdata, Bir, Bic, C);
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
int dsmultx_tune(int nrows, int ncols, int nnz, double *A, double *Bdata, int *Bir, int *Bic, double *C, int nblocks, int nthreadsx, int nthreadsy) {
dim3 threadDim(nthreadsx, nthreadsy, 1);
hipLaunchKernelGGL(( __dsmultx), dim3(nblocks),dim3(threadDim), 0, 0, nrows, nnz, A, Bdata, Bir, Bic, C);
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
__global__ void __dsmultT(int nrows, int nnz, double *A, double *Bdata, int *Bir, int *Bic, double *C) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int i = threadIdx.x; i < nrows; i += blockDim.x) {
double aval = 0;
for (int j = jstart; j < jend ; j++) {
if (j == jstart || Bic[j-1] != Bic[j]) {
aval = A[i + nrows * Bic[j]];
}
atomicAdd(&C[i + nrows * Bir[j]], aval * Bdata[j]);
}
}
}
__global__ void __dsmultTx(int nrows, int nnz, double *A, double *Bdata, int *Bir, int *Bic, double *C) {
int bid = threadIdx.y + blockDim.y * blockIdx.x;
int nb = blockDim.y * gridDim.x;
int jstart = ((long long)bid) * nnz / nb;
int jend = ((long long)(bid + 1)) * nnz / nb;
double aval = 0;
for (int j = jstart; j < jend ; j++) {
if (j == jstart || Bic[j-1] != Bic[j]) {
aval = A[threadIdx.x + nrows * Bic[j]];
}
atomicAdd(&C[threadIdx.x + nrows * Bir[j]], aval * Bdata[j]);
}
}
int dsmultT(int nrows, int ncols, int nnz, double *A, double *Bdata, int *Bir, int *Bic, double *C) {
if (nrows < 128) {
int nt = max(1, min(ncols/2, 256/nrows));
dim3 threadDim(nrows, nt, 1);
int nblocks = min(MAXXGRID, max(1, ncols/nt));
hipLaunchKernelGGL(( __dsmultTx), dim3(nblocks),dim3(threadDim), 0, 0, nrows, nnz, A, Bdata, Bir, Bic, C);
} else {
int nthreads = min(1024, nrows);
int nblocks = min(MAXXGRID, ncols);
hipLaunchKernelGGL(( __dsmultT), dim3(nblocks),dim3(nthreads), 0, 0, nrows, nnz, A, Bdata, Bir, Bic, C);
}
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
__global__ void __spsum1(int nrows, int ncols, int nnz, int *Air, int *Aic, double *P, double *B) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int i = jstart + threadIdx.x; i < jend; i += blockDim.x) {
atomicAdd(&B[Aic[i]], P[i]);
}
}
__global__ void __spsum2(int nrows, int ncols, int nnz, int *Air, int *Aic, double *P, double *B) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int i = jstart + threadIdx.x; i < jend; i += blockDim.x) {
atomicAdd(&B[Air[i]], P[i]);
}
}
int spsum(int nrows, int ncols, int nnz, int *Air, int *Aic, double *P, double *B, int n) {
int nthreads = max(32, min(128, nnz));
int nblks = min(65536, max(1, (nnz-1) / 128));
if (n == 1) {
hipLaunchKernelGGL(( __spsum1), dim3(nblks),dim3(nthreads), 0, 0, nrows, ncols, nnz, Air, Aic, P, B);
} else {
hipLaunchKernelGGL(( __spsum2), dim3(nblks),dim3(nthreads), 0, 0, nrows, ncols, nnz, Air, Aic, P, B);
}
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
__global__ void __dds(int nrows, int nnz, double *A, double *B, int *Cir, int *Cic, double *P);
__global__ void __dds0(int nrows, int ncols, double *A, double *B, int *Cir, int *Cic, double *P);
#define DDS_BLKY 32
#if __CUDA_ARCH__ > 200
__global__ void __dds(int nrows, int nnz, double *A, double *B, int *Cir, int *Cic, double *P) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
int tid = threadIdx.x + blockDim.x * threadIdx.y;
for (int j = jstart; j < jend ; j++) {
double sum = 0;
int aoff = nrows * Cir[j];
int boff = nrows * Cic[j];
for (int i = tid; i < nrows; i += blockDim.x * blockDim.y) {
sum += A[i + aoff] * B[i + boff];
}
for (int i = 1; i < blockDim.x; i *= 2) {
double tmp = __shfl_down(sum, i);
if (threadIdx.x + i < blockDim.x) sum = sum + tmp;
}
if (threadIdx.x == 0) {
atomicAdd(&P[j], sum);
}
}
}
__global__ void __dds0(int nrows, int ncols, double *A, double *B, int *Cir, int *Cjc, double *P) {
__shared__ double merge[32];
int jstart = ((long long)blockIdx.x) * ncols / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * ncols / gridDim.x;
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int aoff, boff;
double user, prod, sum, bsum;
for (int j0 = jstart; j0 < jend ; j0++) {
boff = nrows * j0;
user = B[tid + boff];
for (int j = Cjc[j0]; j < Cjc[j0+1]; j++) {
aoff = nrows * Cir[j];
prod = A[tid + aoff] * user;
sum = prod + __shfl_down(prod, 1);
sum = sum + __shfl_down(sum, 2);
sum = sum + __shfl_down(sum, 4);
sum = sum + __shfl_down(sum, 8);
sum = sum + __shfl_down(sum, 16);
bsum = __shfl(sum, 0);
__syncthreads();
if (threadIdx.x == threadIdx.y) {
merge[threadIdx.x] = bsum;
}
__syncthreads();
if (threadIdx.y == 0) {
sum = merge[threadIdx.x];
sum = sum + __shfl_down(sum, 1);
sum = sum + __shfl_down(sum, 2);
sum = sum + __shfl_down(sum, 4);
sum = sum + __shfl_down(sum, 8);
sum = sum + __shfl_down(sum, 16);
if (threadIdx.x == 0) {
P[j] = sum;
}
}
}
}
}
#else
__global__ void __dds(int nrows, int nnz, double *A, double *B, int *Cir, int *Cic, double *P) {
__shared__ double parts[32*DDS_BLKY];
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
int tid = threadIdx.x + blockDim.x * threadIdx.y;
for (int j = jstart; j < jend ; j++) {
double sum = 0;
int aoff = nrows * Cir[j];
int boff = nrows * Cic[j];
for (int i = tid; i < nrows; i += blockDim.x * blockDim.y) {
sum += A[i + aoff] * B[i + boff];
}
parts[tid] = sum;
for (int i = 1; i < blockDim.x * blockDim.y; i *= 2) {
__syncthreads();
if (i + tid < blockDim.x * blockDim.y) {
parts[tid] = parts[tid] + parts[i + tid];
}
}
__syncthreads();
if (tid == 0) {
P[j] = parts[0];
}
__syncthreads();
}
}
__global__ void __dds0(int nrows, int ncols, double *A, double *B, int *Cir, int *Cjc, double *P) {}
#endif
int dds(int nrows, int nnz, double *A, double *B, int *Cir, int *Cic, double *P) {
dim3 blockDims(min(32,nrows), min(DDS_BLKY, 1+(nrows-1)/64), 1);
// int nblocks = min(65536, max(1,nnz/8));
int nblocks = min(16384, max(1,nnz/128));
hipLaunchKernelGGL(( __dds), dim3(nblocks),dim3(blockDims), 0, 0, nrows, nnz, A, B, Cir, Cic, P);
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
int dds0(int nrows, int ncols, double *A, double *B, int *Cir, int *Cic, double *P) {
dim3 blockDims(32, 32, 1);
// int nblocks = min(65536, max(1,nnz/8));
int nblocks = min(16384, max(1,ncols/64));
hipLaunchKernelGGL(( __dds0), dim3(nblocks),dim3(blockDims), 0, 0, nrows, ncols, A, B, Cir, Cic, P);
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
#define BLOCKDIM 32
__global__ void __transpose(double *in, int instride, double *out, int outstride, int nrows, int ncols) {
int nx = BLOCKDIM * gridDim.x;
int ny = BLOCKDIM * gridDim.y;
int ix = BLOCKDIM * blockIdx.x;
int iy = BLOCKDIM * blockIdx.y;
__shared__ double tile[BLOCKDIM][BLOCKDIM+1];
for (int yb = iy; yb < ncols; yb += ny) {
for (int xb = ix; xb < nrows; xb += nx) {
if (xb + threadIdx.x < nrows) {
int ylim = min(ncols, yb + BLOCKDIM);
for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) {
tile[threadIdx.x][y-yb] = in[threadIdx.x+xb + y*instride];
}
}
__syncthreads();
if (yb + threadIdx.x < ncols) {
int xlim = min(nrows, xb + BLOCKDIM);
for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) {
out[threadIdx.x + yb + x*outstride] = tile[x-xb][threadIdx.x];
}
}
__syncthreads();
}
}
}
int transpose(double *in, int instride, double *out, int outstride, int nrows, int ncols) {
int gridx = min(32, 1+(nrows-1)/256);
int gridy = min(32, 1+(ncols-1)/256);
const dim3 griddims(gridx, gridy, 1);
const dim3 blockdims(BLOCKDIM,16,1);
hipError_t err;
int dev = -1;
hipGetDevice(&dev);
hipLaunchKernelGGL(( __transpose), dim3(griddims),dim3(blockdims), 0, 0, in, instride, out, outstride, nrows, ncols);
hipStreamSynchronize(SYNC_STREAM);
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "cuda error device %d in transpose of %dx%d matrix", dev, nrows, ncols);
return err;
}
return 0;
}
__global__ void __embedmat2d(double *a, long long *b, int nrows, int ncols, int sortdown) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
const int signbit = 0x80000000;
const int mag = 0x7fffffff;
int icol;
for (int i = tid; i < nrows*ncols; i += blockDim.x*gridDim.x*gridDim.y) {
double v = a[i];
int vi = *((int *)&v);
if (vi & signbit) {
vi = -(vi & mag);
}
icol = (i/nrows+1);
if (sortdown) icol = ncols - icol + 1;
b[i] = (long long)vi + (((long long)icol)<<32);
}
}
__global__ void __embedmat(double *a, int *b, long long *c, int n) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
const int signbit = 0x80000000;
const int mag = 0x7fffffff;
for (int i = tid; i < n; i += blockDim.x*gridDim.x*gridDim.y) {
double v = a[i];
int vi = *((int *)&v);
if (vi & signbit) {
vi = -(vi & mag);
}
c[i] = (long long)vi + (((long long)b[i])<<32);
}
}
int embedmat2d(double *a, long long *b, int nrows, int ncols, int sortdown) {
int nthreads;
dim3 griddims;
setsizesLeanD(nrows*ncols, &griddims, &nthreads);
hipLaunchKernelGGL(( __embedmat2d), dim3(griddims),dim3(nthreads), 0, 0, a, b, nrows, ncols, sortdown);
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
int embedmat(double *a, int *b, long long *c, int n) {
int nthreads;
dim3 griddims;
setsizesLeanD(n, &griddims, &nthreads);
hipLaunchKernelGGL(( __embedmat), dim3(griddims),dim3(nthreads), 0, 0, a, b, c, n);
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
__global__ void __extractmat2d(double *a, long long *b, int nrows, int ncols) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
const int signbit = 0x80000000;
const int mag = 0x7fffffff;
for (int i = tid; i < nrows*ncols; i += blockDim.x*gridDim.x*gridDim.y) {
int vi = *((int *)&b[i]);
if (vi & signbit) {
vi = -(vi & mag);
}
a[i] = *((double *)&vi);
}
}
__global__ void __extractmat(double *a, int *b, long long *c, int n) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
const int signbit = 0x80000000;
const int mag = 0x7fffffff;
for (int i = tid; i < n; i += blockDim.x*gridDim.x*gridDim.y) {
int vi = *((int *)&c[i]);
if (vi & signbit) {
vi = -(vi & mag);
}
a[i] = *((double *)&vi);
b[i] = *(((int *)&c[i])+1);
}
}
int extractmat2d(double *a, long long *b, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizesLeanD(nrows*ncols, &griddims, &nthreads);
hipLaunchKernelGGL(( __extractmat2d), dim3(griddims),dim3(nthreads), 0, 0, a, b, nrows, ncols);
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
int extractmat(double *a, int *b, long long *c, int n) {
int nthreads;
dim3 griddims;
setsizesLeanD(n, &griddims, &nthreads);
hipLaunchKernelGGL(( __extractmat), dim3(griddims),dim3(nthreads), 0, 0, a, b, c, n);
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
#include <thrust/sort.h>
#include <thrust/device_ptr.h>
#include <thrust/reverse.h>
int fsort2d(double *pkeys, unsigned int *pvals, int nrows, int ncols, int asc) {
for (int i = 0; i < ncols; i++) {
thrust::device_ptr<double> keys(pkeys+i*nrows);
thrust::device_ptr<unsigned int> vals(pvals+i*nrows);
if (asc > 0) {
thrust::sort_by_key(keys, keys + nrows, vals);
} else {
thrust::sort_by_key(keys, keys + nrows, vals, thrust::greater<double>());
}
}
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
int fsort(double *pkeys, int N, int asc) {
thrust::device_ptr<double> keys(pkeys);
if (asc > 0) {
thrust::sort(keys, keys + N);
} else {
thrust::sort(keys, keys + N, thrust::greater<int>());
}
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
int fsorts(double *pkeys, unsigned int *pvals, int *jc, int m, int asc) {
for (int i = 0; i < m; i++) {
thrust::device_ptr<double> keys(pkeys + jc[i]);
thrust::device_ptr<unsigned int> vals(pvals + jc[i]);
int b = jc[i+1] - jc[i];
if (asc > 0) {
thrust::sort_by_key(keys, keys + b, vals);
} else {
thrust::sort_by_key(keys, keys + b, vals, thrust::greater<double>());
}
}
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
#if TORCH_HIP_VERSION >= 7000
#if TORCH_HIP_VERSION >= 9000
#include <thrust/system/hip/detail/hipcub/hipcub.hpp>
long long disortcubsize(double *inkeys, double *outkeys, unsigned int *invals, unsigned int *outvals, int nelems, int asc) {
size_t size = 0;
void *temp = NULL;
thrust::cuda_cub::cub::DoubleBuffer<double> d_keys(inkeys, outkeys);
thrust::cuda_cub::cub::DoubleBuffer<unsigned int> d_vals(invals, outvals);
if (asc > 0) {
thrust::cuda_cub::hipcub::DeviceRadixSort::SortPairs(temp, size, d_keys, d_vals, nelems);
} else {
thrust::cuda_cub::hipcub::DeviceRadixSort::SortPairsDescending(temp, size, d_keys, d_vals, nelems);
}
hipStreamSynchronize(SYNC_STREAM);
return size;
}
int disortcub(double *inkeys, double *outkeys, unsigned int *invals, unsigned int *outvals, int *temp, long long size, int nelems, int asc) {
thrust::cuda_cub::cub::DoubleBuffer<double> d_keys(inkeys, outkeys);
thrust::cuda_cub::cub::DoubleBuffer<unsigned int> d_vals(invals, outvals);
if (asc > 0) {
thrust::cuda_cub::hipcub::DeviceRadixSort::SortPairs((void *)temp, (size_t &)size, d_keys, d_vals, nelems);
} else {
thrust::cuda_cub::hipcub::DeviceRadixSort::SortPairsDescending((void *)temp, (size_t &)size, d_keys, d_vals, nelems);
}
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
int fsort2dx(double *pkeys, unsigned int *pvals, double *tkeys, unsigned int *tvals,
int nrows, int ncols, int asc) {
int i;
hipError_t err;
long long ntemp;
int * temp;
ntemp = disortcubsize(pkeys, tkeys, pvals, tvals, nrows, asc);
hipMalloc(&temp, ntemp * sizeof(int));
hipStreamSynchronize(SYNC_STREAM);
for (i = 0; i < ncols; i++) {
thrust::cuda_cub::cub::DoubleBuffer<double> d_keys(pkeys + (nrows * i), tkeys + (nrows * i));
thrust::cuda_cub::cub::DoubleBuffer<unsigned int> d_vals(pvals + (nrows * i), tvals + (nrows * i));
if (asc > 0) {
thrust::cuda_cub::hipcub::DeviceRadixSort::SortPairs((void *)temp, (size_t &)ntemp, d_keys, d_vals, nrows);
} else {
thrust::cuda_cub::hipcub::DeviceRadixSort::SortPairsDescending((void *)temp, (size_t &)ntemp, d_keys, d_vals, nrows);
}
}
hipStreamSynchronize(SYNC_STREAM);
hipFree(temp);
err = hipGetLastError();
return err;
}
#else
long long disortcubsize(double *inkeys, double *outkeys, unsigned int *invals, unsigned int *outvals, int nelems, int asc) {
size_t size = 0;
void *temp = NULL;
thrust::system::cuda::detail::cub_::DoubleBuffer<double> d_keys(inkeys, outkeys);
thrust::system::cuda::detail::cub_::DoubleBuffer<unsigned int> d_vals(invals, outvals);
if (asc > 0) {
thrust::system::cuda::detail::cub_::DeviceRadixSort::SortPairs(temp, size, d_keys, d_vals, nelems);
} else {
thrust::system::cuda::detail::cub_::DeviceRadixSort::SortPairsDescending(temp, size, d_keys, d_vals, nelems);
}
hipStreamSynchronize(SYNC_STREAM);
return size;
}
int disortcub(double *inkeys, double *outkeys, unsigned int *invals, unsigned int *outvals, int *temp, long long size, int nelems, int asc) {
thrust::system::cuda::detail::cub_::DoubleBuffer<double> d_keys(inkeys, outkeys);
thrust::system::cuda::detail::cub_::DoubleBuffer<unsigned int> d_vals(invals, outvals);
if (asc > 0) {
thrust::system::cuda::detail::cub_::DeviceRadixSort::SortPairs((void *)temp, (size_t &)size, d_keys, d_vals, nelems);
} else {
thrust::system::cuda::detail::cub_::DeviceRadixSort::SortPairsDescending((void *)temp, (size_t &)size, d_keys, d_vals, nelems);
}
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
int fsort2dx(double *pkeys, unsigned int *pvals, double *tkeys, unsigned int *tvals,
int nrows, int ncols, int asc) {
int i;
hipError_t err;
long long ntemp;
int * temp;
ntemp = disortcubsize(pkeys, tkeys, pvals, tvals, nrows, asc);
hipMalloc(&temp, ntemp * sizeof(int));
hipStreamSynchronize(SYNC_STREAM);
for (i = 0; i < ncols; i++) {
thrust::system::cuda::detail::cub_::DoubleBuffer<double> d_keys(pkeys + (nrows * i), tkeys + (nrows * i));
thrust::system::cuda::detail::cub_::DoubleBuffer<unsigned int> d_vals(pvals + (nrows * i), tvals + (nrows * i));
if (asc > 0) {
thrust::system::cuda::detail::cub_::DeviceRadixSort::SortPairs((void *)temp, (size_t &)ntemp, d_keys, d_vals, nrows);
} else {
thrust::system::cuda::detail::cub_::DeviceRadixSort::SortPairsDescending((void *)temp, (size_t &)ntemp, d_keys, d_vals, nrows);
}
}
hipStreamSynchronize(SYNC_STREAM);
hipFree(temp);
err = hipGetLastError();
return err;
}
#endif
#endif
__global__ void __stratify(double *strata, int n, double *a, double *b, unsigned int *bi, int stride) {
__shared__ double ss[32];
__shared__ unsigned int ibin[32];
__shared__ unsigned int ebin[32];
__shared__ unsigned int todo[32];
__shared__ double bins[64][33];
__shared__ unsigned int topush;
int tid = threadIdx.x;
ss[tid] = strata[tid];
ibin[tid] = 0;
for (int i = 0; i < n; i += blockDim.x * gridDim.x) {
int ii = i + tid + blockDim.x * blockIdx.x;
if (tid == 0) topush = 0;
if (ii < n) {
double v = a[ii];
int j = 1;
j = (v > ss[j-1]) ? 2*j+1 : 2*j;
j = (v > ss[j-1]) ? 2*j+1 : 2*j;
j = (v > ss[j-1]) ? 2*j+1 : 2*j;
j = (v > ss[j-1]) ? 2*j+1 : 2*j;
j = (v > ss[j-1]) ? 2*j+1 : 2*j;
j = j - 32;
int k = atomicInc(&ibin[j], 256);
bins[k][j] = v;
if (k == 31) {
k = atomicInc(&topush, 1024);
todo[k] = j;
}
}
if (ibin[tid] >= 32) {
ebin[tid] = atomicAdd(&bi[tid], 32);
ibin[tid] = ibin[tid] - 32;
}
for (int k = 0; k < topush; k++) {
int j = todo[k];
b[j*stride + ebin[j] + tid] = bins[ibin[j] + tid][j];
}
}
ebin[tid] = atomicAdd(&bi[tid], ibin[tid]);
for (int j = 0; j < 32; j++) {
if (tid < ibin[j]) {
b[j*stride + ebin[j] + tid] = bins[tid][j];
}
}
}
int stratify(double *strata, int n, double *a, double *b, unsigned int *bi, int stride) {
hipLaunchKernelGGL(( __stratify), dim3(40),dim3(32), 0, 0, strata, n, a, b, bi, stride);
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
#define SNDVALS 256
#define SNDGRPS 4
#define SNTHREADS 1024
#define SBIGBLK (4*1024)
__global__ void __stratifycounts(double *strata, int n, double *a, unsigned int *bi) {
__shared__ unsigned int ic[SNDVALS][SNDGRPS];
__shared__ double ss[SNDVALS];
int istart = (int)(((long long)blockIdx.x) * n / gridDim.x);
int iend = (int)(((long long)(blockIdx.x+1)) * n / gridDim.x);
int bibase = SNDVALS * (blockIdx.x + istart / SBIGBLK);
int tid = threadIdx.x + threadIdx.y * blockDim.x;
if (threadIdx.y == 0) {
ss[threadIdx.x] = strata[threadIdx.x];
}
for (int i = istart; i < iend; i += SBIGBLK) {
__syncthreads();
if (threadIdx.y < SNDGRPS) {
ic[threadIdx.x][threadIdx.y] = 0;
}
__syncthreads();
for (int k = i + tid; k < min(iend, i + tid + SBIGBLK); k += SNTHREADS) {
double v = a[k];
int j = 0;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = j - SNDVALS + 1;
atomicInc(&ic[j][threadIdx.y], 65536*32767);
}
__syncthreads();
if (threadIdx.y == 0) {
bi[bibase + threadIdx.x] = ic[threadIdx.x][0] + ic[threadIdx.x][1] + ic[threadIdx.x][2] + ic[threadIdx.x][3];
}
bibase += SNDVALS;
}
}
int stratifycounts(double *strata, int n, double *a, unsigned int *bi) {
const dim3 blockdims(SNDVALS, SNTHREADS/SNDVALS, 1);
const dim3 griddims(8,1,1);
hipLaunchKernelGGL(( __stratifycounts), dim3(griddims),dim3(blockdims), 0, 0, strata, n, a, bi);
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
#define RNDVALS 256
#define RNTHREADS 256
#define RNDBITS 8
#define RBIGBLK (4*1024)
__global__ void __radixcounts(double *a, int n, int digit, unsigned int *bi) {
__shared__ unsigned int ic[RNDVALS];
int istart = (int)(((long long)blockIdx.x) * n / gridDim.x);
int iend = (int)(((long long)(blockIdx.x+1)) * n / gridDim.x);
int tid = threadIdx.x;
int bibase = RNDVALS * (blockIdx.x + istart / RBIGBLK);
for (int i = istart; i < iend; i += RBIGBLK) {
__syncthreads();
ic[threadIdx.x] = 0;
__syncthreads();
for (int j = i + tid; j < min(iend, i+tid+RBIGBLK); j += RNTHREADS) {
double v = a[j];
unsigned char *cv = (unsigned char *)&v;
atomicInc(&ic[cv[digit]], 65536*32767);
}
__syncthreads();
bi[bibase + threadIdx.x] = ic[threadIdx.x];
bibase += RNDVALS;
}
}
int radixcounts(double *a, int n, int digit, unsigned int *bi) {
const dim3 blockdims(RNTHREADS,1,1);
const dim3 griddims(32,1,1);
hipLaunchKernelGGL(( __radixcounts), dim3(griddims),dim3(blockdims), 0, 0, a, n, digit, bi);
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
#if __CUDA_ARCH__ > 200
#define GENDISTS(DFNAME,DFUNC) \
__global__ void DFNAME(double *A, int lda, double *B, int ldb, double *C, \
int ldc, int d, int nrows, int ncols, double p) { \
int xblk = blockDim.x * (threadIdx.y + blockIdx.y * blockDim.y); \
int yblk = blockDim.x * (threadIdx.z + blockIdx.z * blockDim.z); \
double va, vb, vc; \
double R00, R01, R02, R03, R04, R05, R06, R07, R08, R09, R10, R11, R12, R13, R14, R15, \
R16, R17, R18, R19, R20, R21, R22, R23, R24, R25, R26, R27, R28, R29, R30, R31; \
int xi = threadIdx.x + xblk; \
int yi = threadIdx.x; \
if (xi < nrows) { \
if (yi+yblk < ncols) {R00 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R01 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R02 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R03 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R04 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R05 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R06 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R07 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R08 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R09 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R10 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R11 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R12 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R13 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R14 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R15 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R16 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R17 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R18 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R19 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R20 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R21 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R22 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R23 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R24 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R25 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R26 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R27 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R28 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R29 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R30 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R31 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
} \
yi = threadIdx.x + yblk; \
int nbr = (threadIdx.x + 1) % blockDim.x; \
for (int i = 0; i < d; i++) { \
va = (xi < nrows) ? A[xi + i * lda] : 0; \
vb = (yi < ncols) ? B[yi + i * ldb] : 0; \
vc=R00; DFUNC; R00=vc; vb=__shfl(vb, nbr); vc=R01; DFUNC; R01=vc; vb=__shfl(vb, nbr); \
vc=R02; DFUNC; R02=vc; vb=__shfl(vb, nbr); vc=R03; DFUNC; R03=vc; vb=__shfl(vb, nbr); \
vc=R04; DFUNC; R04=vc; vb=__shfl(vb, nbr); vc=R05; DFUNC; R05=vc; vb=__shfl(vb, nbr); \
vc=R06; DFUNC; R06=vc; vb=__shfl(vb, nbr); vc=R07; DFUNC; R07=vc; vb=__shfl(vb, nbr); \
vc=R08; DFUNC; R08=vc; vb=__shfl(vb, nbr); vc=R09; DFUNC; R09=vc; vb=__shfl(vb, nbr); \
vc=R10; DFUNC; R10=vc; vb=__shfl(vb, nbr); vc=R11; DFUNC; R11=vc; vb=__shfl(vb, nbr); \
vc=R12; DFUNC; R12=vc; vb=__shfl(vb, nbr); vc=R13; DFUNC; R13=vc; vb=__shfl(vb, nbr); \
vc=R14; DFUNC; R14=vc; vb=__shfl(vb, nbr); vc=R15; DFUNC; R15=vc; vb=__shfl(vb, nbr); \
vc=R16; DFUNC; R16=vc; vb=__shfl(vb, nbr); vc=R17; DFUNC; R17=vc; vb=__shfl(vb, nbr); \
vc=R18; DFUNC; R18=vc; vb=__shfl(vb, nbr); vc=R19; DFUNC; R19=vc; vb=__shfl(vb, nbr); \
vc=R20; DFUNC; R20=vc; vb=__shfl(vb, nbr); vc=R21; DFUNC; R21=vc; vb=__shfl(vb, nbr); \
vc=R22; DFUNC; R22=vc; vb=__shfl(vb, nbr); vc=R23; DFUNC; R23=vc; vb=__shfl(vb, nbr); \
vc=R24; DFUNC; R24=vc; vb=__shfl(vb, nbr); vc=R25; DFUNC; R25=vc; vb=__shfl(vb, nbr); \
vc=R26; DFUNC; R26=vc; vb=__shfl(vb, nbr); vc=R27; DFUNC; R27=vc; vb=__shfl(vb, nbr); \
vc=R28; DFUNC; R28=vc; vb=__shfl(vb, nbr); vc=R29; DFUNC; R29=vc; vb=__shfl(vb, nbr); \
vc=R30; DFUNC; R30=vc; vb=__shfl(vb, nbr); vc=R31; DFUNC; R31=vc; vb=__shfl(vb, nbr); \
} \
yi = threadIdx.x; \
if (xi < nrows) { \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R00;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R01;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R02;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R03;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R04;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R05;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R06;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R07;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R08;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R09;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R10;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R11;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R12;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R13;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R14;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R15;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R16;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R17;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R18;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R19;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R20;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R21;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R22;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R23;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R24;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R25;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R26;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R27;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R28;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R29;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R30;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R31;} yi = (yi+1) % blockDim.x; \
} \
}
GENDISTS(__l1dist,vc+=abs(va-vb))
GENDISTS(__l2dist,vc+=(va-vb)*(va-vb))
GENDISTS(__minkowskidist,vc+=pow(abs(va-vb),p))
GENDISTS(__linfdist,vc=max(vc,abs(va-vb)))
GENDISTS(__msum,vc=max(vc,va+vb))
#else
__global__ void __l1dist(double *A, int lda, double *B, int ldb, double *C, int ldc, int d, int nrows, int ncols, double p) {
printf("Warning, Lidist not supported on arch <= 200\n");
}
__global__ void __l2dist(double *A, int lda, double *B, int ldb, double *C, int ldc, int d, int nrows, int ncols, double p) {
printf("Warning, L2dist not supported on arch <= 200\n");
}
__global__ void __minkowskidist(double *A, int lda, double *B, int ldb, double *C, int ldc, int d, int nrows, int ncols, double p) {
printf("Warning, Minkowski distance not supported on arch <= 200\n");
}
__global__ void __linfdist(double *A, int lda, double *B, int ldb, double *C, int ldc, int d, int nrows, int ncols, double p) {
printf("Warning, Max-abs distance not supported on arch <= 200\n");
}
__global__ void __msum(double *A, int lda, double *B, int ldb, double *C, int ldc, int d, int nrows, int ncols, double p) {
printf("Warning, Max-sum multiply not supported on arch <= 200\n");
}
#endif
int dists(double *A, int lda, double *B, int ldb, double *C, int ldc, int d, int nrows, int ncols, double p) {
dim3 blockdim(32,4,4);
dim3 griddim(1,1+(nrows-1)/128,1+(ncols-1)/128);
// hipSetDevice(ithread);
if (p == 0.0f) {
hipLaunchKernelGGL(( __linfdist), dim3(griddim),dim3(blockdim), 0, 0, A, lda, B, ldb, C, ldc, d, nrows, ncols, p);
} else if (p == 1.0f) {
hipLaunchKernelGGL(( __l1dist), dim3(griddim),dim3(blockdim), 0, 0, A, lda, B, ldb, C, ldc, d, nrows, ncols, p);
} else if (p == 2.0f) {
hipLaunchKernelGGL(( __l2dist), dim3(griddim),dim3(blockdim), 0, 0, A, lda, B, ldb, C, ldc, d, nrows, ncols, p);
} else {
hipLaunchKernelGGL(( __minkowskidist), dim3(griddim),dim3(blockdim), 0, 0, A, lda, B, ldb, C, ldc, d, nrows, ncols, p);
}
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
int maxsumx(double *A, int lda, double *B, int ldb, double *C, int ldc, int d, int nrows, int ncols) {
dim3 blockdim(32,4,4);
dim3 griddim(1,1+(nrows-1)/128,1+(ncols-1)/128);
hipLaunchKernelGGL(( __msum), dim3(griddim),dim3(blockdim), 0, 0, A, lda, B, ldb, C, ldc, d, nrows, ncols, 0);
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
#if __CUDA_ARCH__ > 200
template<class T>
__global__ void __cumsumg(T *in, T *out, int *jc, int nrows, int ncols, int m) {
__shared__ T tots[32];
int start, end, ij;
int bid = blockIdx.y + blockIdx.z * blockDim.y; // column index
T sum, tsum, tmp, ttot, ttot0;
if (bid < ncols) {
for (ij = blockIdx.x; ij < m; ij += gridDim.x) {
start = jc[ij] + bid * nrows;
end = jc[ij+1] + bid * nrows;
sum = 0;
for (int i = start + threadIdx.x + threadIdx.y * blockDim.x; i < end; i += blockDim.x * blockDim.y) {
tsum = in[i];
tmp = __shfl_up(tsum, 1);
if (threadIdx.x >= 1) tsum += tmp;
tmp = __shfl_up(tsum, 2);
if (threadIdx.x >= 2) tsum += tmp;
tmp = __shfl_up(tsum, 4);
if (threadIdx.x >= 4) tsum += tmp;
tmp = __shfl_up(tsum, 8);
if (threadIdx.x >= 8) tsum += tmp;
tmp = __shfl_up(tsum, 16);
if (threadIdx.x >= 16) tsum += tmp;
ttot = __shfl(tsum, min(end-start-1, 31));
ttot0 = ttot;
__syncthreads();
if (threadIdx.x == threadIdx.y) {
tots[threadIdx.y] = ttot;
}
__syncthreads();
for (int k = 1; k < blockDim.y; k *= 2) {
if (threadIdx.y >= k) {
if (threadIdx.x == threadIdx.y - k) {
ttot += tots[threadIdx.x];
}
}
__syncthreads();
if (threadIdx.y >= k) {
ttot = __shfl(ttot, threadIdx.y - k);
if (threadIdx.x == threadIdx.y) {
tots[threadIdx.y] = ttot;
}
}
__syncthreads();
}
out[i] = sum + tsum + ttot - ttot0;
if (threadIdx.x == blockDim.y - 1) {
ttot = tots[threadIdx.x];
}
__syncthreads();
ttot = __shfl(ttot, blockDim.y - 1);
sum += ttot;
}
}
}
}
template<class T>
__global__ void __maxming(T *in, T *out, int *outi, int *jc, int nrows, int ncols, int m, T maxminv, int dir) {
__shared__ T maxv[32];
__shared__ int maxi[32];
T vmax, vtmp;
int imax, itmp, i, k, start, end, ij;
int bid = blockIdx.y + blockIdx.z * gridDim.y;
if (bid < ncols) {
for (ij = blockIdx.x; ij < m; ij += gridDim.x) {
vmax = maxminv;
imax = -1;
start = jc[ij];
end = jc[ij+1];
for (i = start + threadIdx.x + threadIdx.y * blockDim.x; i < end; i += blockDim.x * blockDim.y) {
vtmp = in[i + nrows * bid];
itmp = i;
if (dir ? (vtmp > vmax) : (vtmp < vmax)) {
vmax = vtmp;
imax = itmp;
}
}
for (k = 1; k < blockDim.x; k *= 2) {
vtmp = __shfl_up(vmax, k);
itmp = __shfl_up(imax, k);
if (threadIdx.x >= k) {
if (dir ? (vtmp > vmax) : (vtmp < vmax)) {
vmax = vtmp;
imax = itmp;
}
}
}
vmax = __shfl(vmax, blockDim.x - 1);
imax = __shfl(imax, blockDim.x - 1);
__syncthreads();
if (threadIdx.x == threadIdx.y) {
maxv[threadIdx.y] = vmax;
maxi[threadIdx.y] = imax;
}
__syncthreads();
if (threadIdx.y == 0) {
vmax = maxv[threadIdx.x];
imax = maxi[threadIdx.x];
}
__syncthreads();
if (threadIdx.y == 0) {
for (k = 1; k < blockDim.y; k *= 2) {
vtmp = __shfl_up(vmax, k);
itmp = __shfl_up(imax, k);
if (threadIdx.x >= k) {
if (dir ? (vtmp > vmax) : (vtmp < vmax)) {
vmax = vtmp;
imax = itmp;
}
}
}
if (threadIdx.x == blockDim.y - 1) {
out[ij + m * bid] = vmax;
outi[ij + m * bid] = imax;
}
}
}
}
}
template<class T>
__global__ void __maxmini_cols(T *in, T *out, int *outi, int nrows, int ncols, T maxminv, int dir) {
__shared__ T maxv[32];
__shared__ int maxi[32];
T vmax, vtmp;
int imax, itmp, i, k;
int bid = blockIdx.x + blockIdx.y * gridDim.x;
if (bid < ncols) {
vmax = maxminv;
imax = -1;
for (i = threadIdx.x + threadIdx.y * blockDim.x; i < nrows; i += blockDim.x * blockDim.y) {
vtmp = in[i + nrows * bid];
itmp = i;
if (dir ? (vtmp > vmax) : (vtmp < vmax)) {
vmax = vtmp;
imax = itmp;
}
}
for (k = 1; k < blockDim.x; k *= 2) {
vtmp = __shfl_up(vmax, k);
itmp = __shfl_up(imax, k);
if (threadIdx.x >= k) {
if (dir ? (vtmp > vmax) : (vtmp < vmax)) {
vmax = vtmp;
imax = itmp;
}
}
}
vmax = __shfl(vmax, blockDim.x - 1);
imax = __shfl(imax, blockDim.x - 1);
__syncthreads();
if (threadIdx.x == threadIdx.y) {
maxv[threadIdx.y] = vmax;
maxi[threadIdx.y] = imax;
}
__syncthreads();
if (threadIdx.y == 0) {
vmax = maxv[threadIdx.x];
imax = maxi[threadIdx.x];
}
__syncthreads();
if (threadIdx.y == 0) {
for (k = 1; k < blockDim.y; k *= 2) {
vtmp = __shfl_up(vmax, k);
itmp = __shfl_up(imax, k);
if (threadIdx.x >= k) {
if (dir ? (vtmp > vmax) : (vtmp < vmax)) {
vmax = vtmp;
imax = itmp;
}
}
}
if (threadIdx.x == blockDim.y - 1) {
out[bid] = vmax;
outi[bid] = imax;
}
}
__syncthreads();
}
}
// Not very fast for wide matrices
template<class T>
__global__ void __maxmini_rows(T *in, T *out, int *outi, int nrows, int ncols, int dir) {
T vmax, vtmp;
int imax, itmp, i, j;
for (i = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * blockIdx.x); i < nrows; i += blockDim.x * blockDim.y * gridDim.x) {
if (ncols > 0) {
vmax = in[i];
imax = 0;
for (j = 1; j < ncols; j++) {
vtmp = in[i + nrows * j];
itmp = j;
if (dir ? (vtmp > vmax) : (vtmp < vmax)) {
vmax = vtmp;
imax = itmp;
}
}
out[i] = vmax;
outi[i] = imax;
}
}
}
#else
template<class T>
__global__ void __cumsumg(T *in, T *out, int *jc, int nrows, int ncols, int m) {}
template<class T>
__global__ void __maxming(T *in, T *out, int *outi, int *jc, int nrows, int ncols, int m, T minv, int dir) {}
template<class T>
__global__ void __maxmini_cols(T *in, T *out, int *outi, int nrows, int ncols, T minv, int dir) {}
template<class T>
__global__ void __maxmini_rows(T *in, T *out, int *outi, int nrows, int ncols, int dir) {}
#endif
void setindsD(int ncols, int &nc1, int &nc2) {
if (ncols < 65536) {
nc1 = ncols;
nc2 = 1;
} else {
nc1 = (int)sqrt((double)ncols);
nc2 = 1 + (ncols-1)/nc1;
}
}
template<class T>
int cumsumg(T *in, T *out, int *jc, int nrows, int ncols, int m) {
int nc1, nc2;
setindsD(ncols, nc1, nc2);
dim3 grid(min(64, m), nc1, nc2);
int ny = min(32, 1+nrows/m/32);
dim3 tblock(32, ny, 1);
hipLaunchKernelGGL(( __cumsumg<T>), dim3(grid),dim3(tblock), 0, 0, in, out, jc, nrows, ncols, m);
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
int cumsumgf(double *in, double *out, int *jc, int nrows, int ncols, int m) {
return cumsumg<double>(in, out, jc, nrows, ncols, m);
}
template<class T>
int maxming(T *in, T *out, int *outi, int *jc, int nrows, int ncols, int m, T minv, int dir) {
int nc1, nc2;
setindsD(ncols, nc1, nc2);
dim3 grid(min(64, m), nc1, nc2);
int ny = min(32, 1+nrows/m/32);
dim3 tblock(32, ny, 1);
hipLaunchKernelGGL(( __maxming<T>), dim3(grid),dim3(tblock), 0, 0, in, out, outi, jc, nrows, ncols, m, minv, dir);
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
// JFC: problem here ncols a non-multiple of 16, and nrows < 32.
template<class T>
int maxmini_cols(T *in, T *out, int *outi, int nrows, int ncols, T minv, int dir) {
int nc1, nc2;
setindsD(ncols, nc1, nc2);
dim3 grid(nc1, nc2, 1);
int ny = min(32, 1+nrows/32);
dim3 tblock(32, ny, 1);
hipLaunchKernelGGL(( __maxmini_cols<T>), dim3(grid),dim3(tblock), 0, 0, in, out, outi, nrows, ncols, minv, dir);
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
template<class T>
int maxmini_rows(T *in, T *out, int *outi, int nrows, int ncols, int dir) {
int nb = min(32,1+nrows/32);
dim3 grid(nb,1,1);
int ny = min(32, 1+nrows/nb/32);
dim3 tblock(32, ny, 1);
hipLaunchKernelGGL(( __maxmini_rows<T>), dim3(grid),dim3(tblock), 0, 0, in, out, outi, nrows, ncols, dir);
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
int maxgf(double *in, double *out, int *outi, int *jc, int nrows, int ncols, int m) {
return maxming<double>(in, out, outi, jc, nrows, ncols, m, -3e38f, 1);
}
int mingf(double *in, double *out, int *outi, int *jc, int nrows, int ncols, int m) {
return maxming<double>(in, out, outi, jc, nrows, ncols, m, 3e38f, 0);
}
int maxif(double *in, double *out, int *outi, int nrows, int ncols, int dir) {
if (dir == 1) {
return maxmini_cols<double>(in, out, outi, nrows, ncols, -3e38f, 1);
} else if (dir == 2) {
return maxmini_rows<double>(in, out, outi, nrows, ncols, 1);
} else {
return -1;
}
}
int minif(double *in, double *out, int *outi, int nrows, int ncols, int dir) {
if (dir == 1) {
return maxmini_cols<double>(in, out, outi, nrows, ncols, 3e38f, 0);
} else if (dir == 2) {
return maxmini_rows<double>(in, out, outi, nrows, ncols, 0);
} else {
return -1;
}
}
__global__ void __dmv(double *a, int nrows, int ncols, double *b, double *c) {
for (int tx = threadIdx.x + blockDim.x * blockIdx.x; tx < nrows; tx += blockDim.x * gridDim.x) {
double accum = 0.0;
for (int ty = threadIdx.y + blockDim.y * blockIdx.y; ty < ncols; ty += blockDim.y * gridDim.y) {
accum += a[tx+nrows*ty] * b[ty];
}
atomicAdd(&c[tx], accum);
}
}
#if __CUDA_ARCH__ > 200
__global__ void __dmvt(double *a, int nrows, int ncols, double *b, double *c) {
for (int ty = threadIdx.y + blockDim.y * blockIdx.y; ty < ncols; ty += blockDim.y * gridDim.y) {
double accum = 0.0f;
for (int tx = threadIdx.x + blockDim.x * blockIdx.x; tx < nrows; tx += blockDim.x * gridDim.x) {
accum += a[tx+nrows*ty] * b[tx];
}
for (int i = 1; i < blockDim.x; i *= 2) {
double tmp = __shfl_down(accum, i);
if (threadIdx.x + i < blockDim.x) accum += tmp;
}
if (threadIdx.x == 0) {
atomicAdd(&c[ty], accum);
}
}
}
#else
__global__ void __dmvt(double *a, int nrows, int ncols, double *b, double *c) {
for (int ty = threadIdx.y + blockDim.y * blockIdx.y; ty < ncols; ty += blockDim.y * gridDim.y) {
double accum = 0.0;
for (int tx = threadIdx.x + blockDim.x * blockIdx.x; tx < nrows; tx += blockDim.x * gridDim.x) {
accum += a[tx+nrows*ty] * b[tx];
}
atomicAdd(&c[ty], accum);
}
}
#endif
__global__ void __dmv0(double *a, int nrows, int ncols, int tstep, double *b, double *c) {
double accum = 0.0f;
int tx = threadIdx.x + blockDim.x * blockIdx.x;
if (tx < tstep) {
for (; tx < nrows*ncols; tx += tstep) {
int icol = tx / nrows;
accum += a[tx] * b[icol];
}
int irow = tx % nrows;
atomicAdd(&c[irow], accum);
}
}
int dmv(double *a, int nrows, int ncols, double *b, double *c, int trans) {
if (trans == 1) {
int ntx = min(32, nrows);
int nty = min(32, ncols);
int nbx = min(256, 1 + nrows/ntx/8);
int nby = min(256, 1 + ncols/nty/2);
dim3 blockdims(ntx,nty,1);
dim3 griddims(nbx,nby,1);
hipLaunchKernelGGL(( __dmvt), dim3(griddims),dim3(blockdims), 0, 0, a, nrows, ncols, b, c);
} else {
int ntx = min(1024, nrows*ncols);
int nbx = max(1+(nrows-1)/ntx, nrows*ncols/ntx/32);
int tstep = (ntx*nbx/nrows)*nrows;
hipLaunchKernelGGL(( __dmv0), dim3(nbx),dim3(ntx), 0, 0, a, nrows, ncols, tstep, b, c);
}
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
#define ACCUM_KERNEL(TI,TJ,TV,TS,II,IJ,IV) \
__global__ void __accum(TI, TJ, TV, TS, int m, int nrows) { \
int istart = ((int)(((long long)blockIdx.x) * m / gridDim.x)); \
int iend = ((int)(((long long)blockIdx.x + 1) * m / gridDim.x)); \
istart = (istart / 32) * 32; \
if (blockIdx.x != gridDim.x - 1) { \
iend = (iend / 32) * 32; \
} \
for (int i = istart + threadIdx.x; i < iend; i+= blockDim.x) { \
atomicAdd(&S[II + nrows * IJ], IV); \
} \
} \
int accum(TI, TJ, TV, TS, int m, int nrows) { \
int nthreads = max(32, min(512, m)); \
int nblocks = max(1, min(65535, m/nthreads/8)); \
hipLaunchKernelGGL(( __accum), dim3(nblocks),dim3(nthreads), 0, 0, I,J,V,S,m,nrows); \
hipStreamSynchronize(SYNC_STREAM); \
hipError_t err = hipGetLastError(); \
return err; \
}
ACCUM_KERNEL(int*I, int*J, double*V, double*S, I[i], J[i], V[i])
ACCUM_KERNEL(int*I, int J, double*V, double*S, I[i], J, V[i])
ACCUM_KERNEL(int I, int*J, double*V, double*S, I, J[i], V[i])
ACCUM_KERNEL(int*I, int*J, double V, double*S, I[i], J[i], V)
ACCUM_KERNEL(int*I, int J, double V, double*S, I[i], J, V)
ACCUM_KERNEL(int I, int*J, double V, double*S, I, J[i], V)
const int INBLOCK = 4;
// copy and transpose columns of the input matrix into the output matrix. nrows refers to the input matrix
// (and so is ncols for the output). ncols is the length of the iptrs array, which will be the number of
// rows of the output matrix. iptrs specifies the columns of the input array to copy.
// outstride is stride of the output matrix
__global__ void __icopy_transpose(int *iptrs, double *in, double *out, int outstride, int nrows, int ncols) {
__shared__ double tile[BLOCKDIM][BLOCKDIM+1];
int nx = BLOCKDIM * gridDim.x;
int ny = BLOCKDIM * gridDim.y;
int ix = BLOCKDIM * blockIdx.x;
int iy = BLOCKDIM * blockIdx.y;
for (int yb = iy; yb < ncols; yb += ny) {
for (int xb = ix; xb < nrows; xb += nx) {
if (xb + threadIdx.x < nrows) {
int ylim = min(ncols, yb + BLOCKDIM);
for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) {
tile[threadIdx.x][y-yb] = in[threadIdx.x + xb + iptrs[y]*nrows];
}
}
__syncthreads();
if (yb + threadIdx.x < ncols) {
int xlim = min(nrows, xb + BLOCKDIM);
for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) {
out[threadIdx.x + yb + x*outstride] = tile[x-xb][threadIdx.x];
}
}
__syncthreads();
}
}
}
int icopy_transpose(int *iptrs, double *in, double *out, int stride, int nrows, int ncols) {
const dim3 griddims(20,256,1);
const dim3 blockdims(BLOCKDIM,INBLOCK,1);
hipError_t err;
hipLaunchKernelGGL(( __icopy_transpose), dim3(griddims),dim3(blockdims), 0, 0, iptrs, in, out, stride, nrows, ncols);
hipStreamSynchronize(SYNC_STREAM);
err = hipGetLastError();
if (err != hipSuccess) {fprintf(stderr, "cuda error in icopy_transpose"); return err;}
return 0;
}
// copy and transpose the input matrix into columns of the output matrix. nrows, ncols refer to output matrix
__global__ void __ocopy_transpose(int *optrs, double *in, double *out, int instride, int nrows, int ncols) {
int nx = BLOCKDIM * gridDim.x;
int ny = BLOCKDIM * gridDim.y;
int ix = BLOCKDIM * blockIdx.x;
int iy = BLOCKDIM * blockIdx.y;
__shared__ double tile[BLOCKDIM][BLOCKDIM+1];
for (int yb = iy; yb < ncols; yb += ny) {
for (int xb = ix; xb < nrows; xb += nx) {
if (yb + threadIdx.x < ncols) {
int xlim = min(nrows, xb + BLOCKDIM);
for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) {
tile[x-xb][threadIdx.x] = in[threadIdx.x + yb + x*instride];
}
}
__syncthreads();
if (xb + threadIdx.x < nrows) {
int ylim = min(ncols, yb + BLOCKDIM);
for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) {
out[optrs[y]*nrows + threadIdx.x + xb] = tile[threadIdx.x][y-yb];
}
}
__syncthreads();
}
}
}
__global__ void __ocopy_transpose_add(int *optrs, double *in, double *out, int instride, int nrows, int ncols) {
int nx = BLOCKDIM * gridDim.x;
int ny = BLOCKDIM * gridDim.y;
int ix = BLOCKDIM * blockIdx.x;
int iy = BLOCKDIM * blockIdx.y;
__shared__ double tile[BLOCKDIM][BLOCKDIM+1];
for (int yb = iy; yb < ncols; yb += ny) {
for (int xb = ix; xb < nrows; xb += nx) {
if (yb + threadIdx.x < ncols) {
int xlim = min(nrows, xb + BLOCKDIM);
for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) {
tile[x-xb][threadIdx.x] = in[threadIdx.x + yb + x*instride];
}
}
__syncthreads();
if (xb + threadIdx.x < nrows) {
int ylim = min(ncols, yb + BLOCKDIM);
for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) {
atomicAdd(&out[optrs[y]*nrows + threadIdx.x + xb], tile[threadIdx.x][y-yb]);
}
}
__syncthreads();
}
}
}
__global__ void __ocopy_transpose_min(int *optrs, double *in, double *out, int instride, int nrows, int ncols) {
int nx = BLOCKDIM * gridDim.x;
int ny = BLOCKDIM * gridDim.y;
int ix = BLOCKDIM * blockIdx.x;
int iy = BLOCKDIM * blockIdx.y;
__shared__ double tile[BLOCKDIM][BLOCKDIM+1];
for (int yb = iy; yb < ncols; yb += ny) {
for (int xb = ix; xb < nrows; xb += nx) {
if (yb + threadIdx.x < ncols) {
int xlim = min(nrows, xb + BLOCKDIM);
for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) {
tile[x-xb][threadIdx.x] = in[threadIdx.x + yb + x*instride];
}
}
__syncthreads();
if (xb + threadIdx.x < nrows) {
int ylim = min(ncols, yb + BLOCKDIM);
for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) {
atomicMin((int *)&out[optrs[y]*nrows + threadIdx.x + xb], *(int *)(&tile[threadIdx.x][y-yb]));
}
}
__syncthreads();
}
}
}
int ocopy_transpose_add(int *optrs, double *in, double *out, int stride, int nrows, int ncols) {
const dim3 griddims(20,256,1);
const dim3 blockdims(BLOCKDIM,INBLOCK,1);
hipError_t err;
hipLaunchKernelGGL(( __ocopy_transpose_add), dim3(griddims),dim3(blockdims), 0, 0, optrs, in, out, stride, nrows, ncols);
hipStreamSynchronize(SYNC_STREAM);
err = hipGetLastError();
if (err != hipSuccess) {fprintf(stderr, "cuda error in ocopy_transpose"); return err;}
return 0;
}
int ocopy_transpose(int *optrs, double *in, double *out, int stride, int nrows, int ncols) {
const dim3 griddims(20,256,1);
const dim3 blockdims(BLOCKDIM,INBLOCK,1);
hipError_t err;
hipLaunchKernelGGL(( __ocopy_transpose), dim3(griddims),dim3(blockdims), 0, 0, optrs, in, out, stride, nrows, ncols);
hipStreamSynchronize(SYNC_STREAM);
err = hipGetLastError();
if (err != hipSuccess) {fprintf(stderr, "cuda error in ocopy_transpose"); return err;}
return 0;
}
int ocopy_transpose_min(int *optrs, double *in, double *out, int stride, int nrows, int ncols) {
const dim3 griddims(20,256,1);
const dim3 blockdims(BLOCKDIM,INBLOCK,1);
hipError_t err;
hipLaunchKernelGGL(( __ocopy_transpose_min), dim3(griddims),dim3(blockdims), 0, 0, optrs, in, out, stride, nrows, ncols);
hipStreamSynchronize(SYNC_STREAM);
err = hipGetLastError();
if (err != hipSuccess) {fprintf(stderr, "cuda error in ocopy_transpose"); return err;}
return 0;
}
#ifdef TEST
int main(int argc, char **argv) {
int m=8, n=8, opn = 0;
double *dA, *dB, *dC, *A, *B, *C;
if (argc > 1) {
sscanf(argv[1], "%d", &opn);
if (argc > 2) {
sscanf(argv[2], "%d", &m);
if (argc > 3) {
sscanf(argv[3], "%d", &n);
}
}
}
A = (double *)malloc(m*n*sizeof(double));
B = (double *)malloc(m*n*sizeof(double));
C = (double *)malloc(m*n*sizeof(double));
hipMalloc((void**)&dA, m*n*sizeof(double));
hipMalloc((void**)&dB, m*n*sizeof(double));
hipMalloc((void**)&dC, m*n*sizeof(double));
for (int i = 0; i < m*n; i++) {
A[i] = 1.0f;
B[i] = 2.0f;
}
hipMemcpy(dA, A, m*n*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dB, B, m*n*sizeof(double), hipMemcpyHostToDevice);
printf("A %f %f %f %f\n", A[0], A[1], A[2], A[3]);
printf("B %f %f %f %f\n", B[0], B[1], B[2], B[3]);
MatKernel(dA, m, n, dB, m, n, dC, opn);
hipError_t err = hipGetLastError();
if( hipSuccess != err) {
fprintf(stderr, "CUDA error %d", err);
exit(1);
}
hipMemcpy(C, dC, m*n*sizeof(double), hipMemcpyDeviceToHost);
printf("C %f %f %f %f\n", C[0], C[1], C[2], C[3]);
printf("A %f %f %f %f\n", A[0], A[1], A[2], A[3]);
printf("B %f %f %f %f\n", B[0], B[1], B[2], B[3]);
if (dA != NULL) hipFree(dA);
if (dB != NULL) hipFree(dB);
if (dC != NULL) hipFree(dC);
if (C != NULL) free(C);
}
#endif
// Cumulative sum of columns
#if __CUDA_ARCH__ >= 300
__global__ void __cumsumc(int nrows, int ncols, double *A, double *B) {
int i, j, k, lim;
double v, w, sum;
int icol = threadIdx.y + blockDim.y * blockIdx.x;
__syncthreads();
for (i = icol; i < ncols; i += blockDim.y * gridDim.x) {
sum = 0.0f;
for (j = 0; j < nrows; j += blockDim.x) {
v = 0;
if (j + threadIdx.x < nrows) {
v = A[j + threadIdx.x + i * nrows];
}
lim = min(blockDim.x, nrows - j);
#pragma unroll
for (k = 1; k < lim; k = k + k) {
w = __shfl_up(v, k);
if (threadIdx.x >= k) {
v += w;
}
}
v += sum;
if (j + threadIdx.x < nrows) {
B[j + threadIdx.x + i * nrows] = v;
}
sum = __shfl(v, blockDim.x - 1);
}
}
}
#else
__global__ void __cumsumc(int nrows, int ncols, double *A, double *B) {
__shared__ double buff[32];
int i, j, k, lim;
double v, sum;
int icol = threadIdx.y + blockDim.y * blockIdx.x;
__syncthreads();
for (i = icol; i < ncols; i += blockDim.y * gridDim.x) {
sum = 0.0f;
for (j = 0; j < nrows; j += blockDim.x) {
v = 0;
if (j + threadIdx.x < nrows) {
v = A[j + threadIdx.x + i * nrows];
}
__syncthreads();
buff[threadIdx.x] = v;
lim = min(blockDim.x, nrows - j);
#pragma unroll
for (k = 1; k < lim; k = k + k) {
__syncthreads();
if (threadIdx.x >= k) {
v += buff[threadIdx.x - k];
}
__syncthreads();
buff[threadIdx.x] = v;
}
v += sum;
if (j + threadIdx.x < nrows) {
B[j + threadIdx.x + i * nrows] = v;
}
__syncthreads();
sum = buff[31];
__syncthreads();
}
}
}
#endif
int cumsumc(int nrows, int ncols, double *A, double *B) {
if (ncols == 1) {
thrust::device_ptr<double> pa(A);
thrust::device_ptr<double> pb(B);
thrust::inclusive_scan(pa, pa + nrows, pb);
} else {
dim3 threads;
threads.x = 32;
threads.y = min(32, ncols);
int nblocks = min(64, 1 + (ncols-1)/threads.y);
hipLaunchKernelGGL(( __cumsumc), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, A, B);
}
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
int inclusive_scan_by_key_dd(double *fvals, double *fkeys, double *fout, long long len) {
thrust::device_ptr<double> vals(fvals);
thrust::device_ptr<double> keys(fkeys);
thrust::device_ptr<double> out(fout);
thrust::inclusive_scan_by_key(keys, keys+len, vals, out);
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
int inclusive_scan_by_key_ll(long long *fvals, long long *fkeys, long long *fout, long long len) {
thrust::device_ptr<long long> vals(fvals);
thrust::device_ptr<long long> keys(fkeys);
thrust::device_ptr<long long> out(fout);
thrust::inclusive_scan_by_key(keys, keys+len, vals, out);
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
int reverse(double *fvals, double *fout, long long len) {
thrust::device_ptr<double> vals(fvals);
thrust::device_ptr<double> out(fout);
thrust::reverse_copy(vals, vals+len, out);
hipStreamSynchronize(SYNC_STREAM);
hipError_t err = hipGetLastError();
return err;
}
|
7fe7aa7547fcebe5ebe21a27e07e55cccd0fec4e.cu
|
#include <cuda_runtime.h>
#include <stdio.h>
#include <MatKernelD.hpp>
#include <thrust/sort.h>
//#include <cub/device/device_radix_sort.cuh>
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
#else
__device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
#if __CUDA_ARCH__ > 200
#define MAXXGRID 2147483647
#else
#define MAXXGRID 65535
#endif
int getDeviceVersionD() {
int igpu;
cudaGetDevice(&igpu);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, igpu);
return 100 * prop.major + 10 * prop.minor;
}
void setsizesD(long long N, dim3 *gridp, int *nthreadsp) {
int nblocks = 1;
int nthreads = 32;
int threads_per_block = 1024;
// int version;
// version = getDeviceVersionD();
// if (version == 320) threads_per_block = 512;
while (1L * nblocks * nthreads < N) {
if (nblocks < 16) {
nblocks = 2*nblocks;
} else if (nthreads < threads_per_block) {
nthreads = 2*nthreads;
} else {
nblocks = 2*nblocks;
}
}
gridp->y = 1 + (nblocks-1)/65536;
gridp->x = 1 + (nblocks-1)/gridp->y;
gridp->z = 1;
*nthreadsp = nthreads;
}
void setsizesLeanD(long long N, dim3 *gridp, int *nthreadsp) {
int nblocks = 1;
int nthreads = 32;
int threads_per_block = 1024;
// int version;
// version = getDeviceVersionD();
// if (version == 320) threads_per_block = 512;
while (1L * nblocks * nthreads < N) {
if (nblocks < 16) {
nblocks = 2*nblocks;
} else if (nthreads < threads_per_block) {
nthreads = 2*nthreads;
} else {
nblocks = max(nblocks, 1 + (int)((N-1)/nthreads));
}
}
gridp->y = 1 + (nblocks-1)/65536;
gridp->x = 1 + (nblocks-1)/gridp->y;
gridp->z = 1;
*nthreadsp = nthreads;
}
template <class T>
__global__ void __toDouble(T *A, double *B, int N) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
B[i] = (double)(A[i]);
}
}
__global__ void __toInt(double *A, int *B, int N) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
B[i] = (int)(A[i]);
}
}
int IntToDouble(int *A, double *B, int N) {
int nthreads;
dim3 griddims;
setsizesLeanD(N, &griddims, &nthreads);
__toDouble<int><<<griddims,nthreads>>>(A, B, N);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
int FloatToDouble(float *A, double *B, int N) {
int nthreads;
dim3 griddims;
setsizesLeanD(N, &griddims, &nthreads);
__toDouble<float><<<griddims,nthreads>>>(A, B, N);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
int toInt(double *A, int *B, int N) {
int nthreads;
dim3 griddims;
setsizesLeanD(N, &griddims, &nthreads);
__toInt<<<griddims,nthreads>>>(A, B, N);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __full(int *ir, int *ic, double *data, double *od, int nrows, int ncols, int nnz) {
int i, row, col;
double v;
int id = threadIdx.x + blockIdx.x * blockDim.x;
for (i = id; i < nnz; i += blockDim.x * gridDim.x) {
v = data[i];
row = ir[i];
col = ic[i];
od[row + col * nrows] = v;
}
}
int full(int *ir, int *ic, double *data, double *od, int nrows, int ncols, int nnz) {
int nblocks = min(32, 1+(nnz-1)/32);
int nthreads = max(32, min(1+(nnz-1)/nblocks, 1024));
__full<<<nblocks,nthreads>>>(ir, ic, data, od, nrows, ncols, nnz);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __set_val(double *A, double val, int length) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < length; i += blockDim.x * gridDim.x * gridDim.y) {
A[i] = val;
}
}
int set_val(double *A, double val, int length) {
int nthreads;
dim3 griddims;
setsizesLeanD(length, &griddims, &nthreads);
__set_val<<<griddims,nthreads>>>(A, val, length);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
int set_ival(double *A, int val, int length) {
int nthreads;
dim3 griddims;
setsizesLeanD(length, &griddims, &nthreads);
__set_val<<<griddims,nthreads>>>(A, *((double *)&val), length);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __copyToInds(double *A, double *B, int *I, long long len) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int step = blockDim.x * gridDim.x * gridDim.y;
long long i;
for (i = tid; i < len; i += step) {
B[I[i]] = A[i];
}
}
int copyToInds(double *A, double *B, int *I, long long len) {
int nthreads;
dim3 griddims;
setsizesLeanD(len, &griddims, &nthreads);
__copyToInds<<<griddims,nthreads>>>(A, B, I, len);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
template<typename T>
__global__ void __copyFromInds(T *A, T *B, int *I, long long len) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int step = blockDim.x * gridDim.x * gridDim.y;
long long i;
for (i = tid; i < len; i += step) {
B[i] = A[I[i]];
}
}
int copyFromInds(double *A, double *B, int *I, long long len) {
int nthreads;
dim3 griddims;
setsizesLeanD(len, &griddims, &nthreads);
__copyFromInds<<<griddims,nthreads>>>(A, B, I, len);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
// Implement B[I,J] = A
// indexed copy: version with one block per column
#define COPYTOINDS2DA(DFNAME,IEXPR,JEXPR) \
__global__ void __copyToInds2D##DFNAME(double *A, int lda, double *B, int ldb, int *I, int nrows, int *J, int ncols) { \
int iblock = blockIdx.x + blockIdx.y * gridDim.x; \
if (iblock < ncols) { \
int icol = JEXPR; \
for (int i = threadIdx.x; i < nrows; i += blockDim.x) { \
B[IEXPR + icol * ldb] = A[i + iblock * lda]; \
} \
} \
}
COPYTOINDS2DA(nn,I[i],J[iblock])
COPYTOINDS2DA(xn,i,J[iblock])
COPYTOINDS2DA(nx,I[i],iblock)
COPYTOINDS2DA(xx,i,iblock)
// Implement B[I,J] = A
// indexed copy: version with one thread per element
#define COPYTOINDS2DB(DFNAME,IEXPR,JEXPR) \
__global__ void __copyToInds2DB##DFNAME(double *A, int lda, double *B, int ldb, int *I, int nrows, int *J, int ncols) { \
int indx = threadIdx.x + blockDim.x * (blockIdx.x + blockIdx.y * gridDim.x); \
if (indx < nrows * ncols) { \
int irow = indx % nrows; \
int icol = indx / nrows; \
B[IEXPR + JEXPR * ldb] = A[irow + icol * lda]; \
} \
}
COPYTOINDS2DB(nn,I[irow],J[icol])
COPYTOINDS2DB(xn,irow,J[icol])
COPYTOINDS2DB(nx,I[irow],icol)
COPYTOINDS2DB(xx,irow,icol)
// Implement B[I,J] = A
int copyToInds2D(double *A, int lda, double *B, int ldb, int *I, int nrows, int *J, int ncols) {
int len = nrows * ncols;
int nthreads = max(32, min(1024, nrows));
int nblocks = min(ncols, (len-1)/nthreads + 1);
dim3 griddims;
griddims.x = 1;
griddims.y = 1;
griddims.z = 1;
if (nblocks < 65536) {
griddims.x = nblocks;
} else {
int vs = (int)sqrt((double)nblocks);
griddims.x = vs;
griddims.y = (nblocks-1)/vs + 1;
}
if (nblocks == ncols) {
if (I == NULL) {
if (J == NULL) {
__copyToInds2Dxx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyToInds2Dxn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
__copyToInds2Dnx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyToInds2Dnn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
}
} else {
if (I == NULL) {
if (J == NULL) {
__copyToInds2DBxx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyToInds2DBxn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
__copyToInds2DBnx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyToInds2DBnn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
}
}
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __copyToInds3D(double *A, int lda, int rda, double *B, int ldb, int rdb, int *I, int nrows, int *J, int ncols, int *K, int nk) {
int ii = threadIdx.x + blockDim.x * blockIdx.x;
int jj = threadIdx.y + blockDim.y * blockIdx.y;
int kk = threadIdx.z + blockDim.z * blockIdx.z;
int i, j, k, mapi, mapj, mapk;
for (k = kk; k < nk; k += blockDim.z * gridDim.z) {
mapk = k;
if (K != NULL) mapk = K[k];
for (j = jj; j < ncols; j += blockDim.y * gridDim.y) {
mapj = j;
if (J != NULL) mapj = J[j];
if (I != NULL) {
for (i = ii; i < nrows; i += blockDim.x * gridDim.x) {
mapi = I[i];
B[mapi + ldb * (mapj + rdb * mapk)] = A[i + lda * (j + rda * k)];
}
} else {
for (i = ii; i < nrows; i += blockDim.x * gridDim.x) {
mapi = i;
B[mapi + ldb * (mapj + rdb * mapk)] = A[i + lda * (j + rda * k)];
}
}
}
}
}
int copyToInds3D(double *A, int lda, int rda, double *B, int ldb, int rdb, int *I, int nrows, int *J, int ncols, int *K, int nk) {
int ntx, nty, ntz, nbx, nby, nbz;
ntx = min(nrows, 1024);
nbx = min((nrows - 1) / ntx + 1, 1024);
nty = min(ncols, 1024/ntx);
nby = min((ncols - 1) / nty + 1, 1024);
ntz = min(nk, 1024/ntx/nty);
nbz = min((nk - 1) / ntz + 1, 1024);
dim3 blockdims(ntx, nty, ntz);
dim3 griddims(nbx, nby, nbz);
__copyToInds3D<<<griddims,blockdims>>>(A, lda, rda, B, ldb, rdb, I, nrows, J, ncols, K, nk);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __copyToInds4D(double *A, int lda, int rda, int tda, double *B, int ldb, int rdb, int tdb, int *I, int nrows, int *J, int ncols, int *K, int nk, int *L, int nl, int ntk, int nbk, int ntl, int nbl) {
int ii = threadIdx.x + blockDim.x * blockIdx.x;
int jj = threadIdx.y + blockDim.y * blockIdx.y;
int tk = threadIdx.z / ntl;
int tl = threadIdx.z - tk * ntl;
int bk = blockIdx.z / nbl;
int bl = blockIdx.z - bk * nbl;
int kk = tk + ntk * bk;
int ll = tl + ntl * bl;
int i, j, k, l, mapi, mapj, mapk, mapl;
for (l = ll; l < nl; l += ntl * nbl) {
mapl = l;
if (L != NULL) mapl = L[l];
for (k = kk; k < nk; k += ntk * nbk) {
mapk = k;
if (K != NULL) mapk = K[k];
for (j = jj; j < ncols; j += blockDim.y * gridDim.y) {
mapj = j;
if (J != NULL) mapj = J[j];
if (I != NULL) {
for (i = ii; i < nrows; i += blockDim.x * gridDim.x) {
mapi = I[i];
B[mapi + ldb * (mapj + rdb * (mapk + tdb * mapl))] = A[i + lda * (j + rda * (k + tda * l))];
}
} else {
for (i = ii; i < nrows; i += blockDim.x * gridDim.x) {
B[i + ldb * (mapj + rdb * (mapk + tdb * mapl))] = A[i + lda * (j + rda * (k + tda * l))];
}
}
}
}
}
}
int copyToInds4D(double *A, int lda, int rda, int tda, double *B, int ldb, int rdb, int tdb, int *I, int nrows, int *J, int ncols, int *K, int nk, int *L, int nl) {
int ntx, nty, ntk, ntl, nbx, nby, nbk, nbl;
ntx = min(nrows, 1024);
nbx = min((nrows - 1) / ntx + 1, 1024);
nty = min(ncols, 1024/ntx);
nby = min((ncols - 1) / nty + 1, 1024);
ntk = min(nk, 1024/ntx/nty);
nbk = min((nk - 1) / ntk + 1, 255);
ntl = min(nl, 1024/ntx/nty/ntk);
nbl = min((nl - 1) / ntl + 1, 255);
dim3 blockdims(ntx, nty, ntk * ntl);
dim3 griddims(nbx, nby, nbk * nbl);
__copyToInds4D<<<griddims,blockdims>>>(A, lda, rda, tda, B, ldb, rdb, tdb, I, nrows, J, ncols, K, nk, L, nl, ntk, nbk, ntl, nbl);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __fillToInds(double A, double *B, int *I, long long len) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int step = blockDim.x * gridDim.x * gridDim.y;
long long i;
for (i = tid; i < len; i += step) {
B[I[i]] = A;
}
}
int fillToInds(double A, double *B, int *I, long long len) {
int nthreads;
dim3 griddims;
setsizesLeanD(len, &griddims, &nthreads);
__fillToInds<<<griddims,nthreads>>>(A, B, I, len);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
// Implement B[I,J] = c
// indexed copy: version with one block per column
#define FILLTOINDS2DA(DFNAME,IEXPR,JEXPR,ETYPE) \
__global__ void __fillToInds2D##DFNAME(ETYPE A, ETYPE *B, int ldb, int *I, int nrows, int *J, int ncols) { \
int iblock = blockIdx.x + blockIdx.y * gridDim.x; \
if (iblock < ncols) { \
int icol = JEXPR; \
for (int i = threadIdx.x; i < nrows; i += blockDim.x) { \
B[IEXPR + icol * ldb] = A; \
} \
} \
}
FILLTOINDS2DA(nn,I[i],J[iblock],double)
FILLTOINDS2DA(xn,i,J[iblock],double)
FILLTOINDS2DA(nx,I[i],iblock,double)
FILLTOINDS2DA(xx,i,iblock,double)
// Implement B[I,J] = A
// indexed copy: version with one thread per element
#define FILLTOINDS2DB(DFNAME,IEXPR,JEXPR,ETYPE) \
__global__ void __fillToInds2DB##DFNAME(ETYPE A, ETYPE *B, int ldb, int *I, int nrows, int *J, int ncols) { \
int indx = threadIdx.x + blockDim.x * (blockIdx.x + blockIdx.y * gridDim.x); \
if (indx < nrows * ncols) { \
int irow = indx % nrows; \
int icol = indx / nrows; \
B[IEXPR + JEXPR * ldb] = A; \
} \
}
FILLTOINDS2DB(nn,I[irow],J[icol],double)
FILLTOINDS2DB(xn,irow,J[icol],double)
FILLTOINDS2DB(nx,I[irow],icol,double)
FILLTOINDS2DB(xx,irow,icol,double)
int fillToInds2D(double A, double *B, int ldb, int *I, int nrows, int *J, int ncols) {
int len = nrows * ncols;
int nthreads = max(32, min(1024, nrows));
int nblocks = min(ncols, (len-1)/nthreads + 1);
dim3 griddims;
griddims.x = 1;
griddims.y = 1;
griddims.z = 1;
if (nblocks < 65536) {
griddims.x = nblocks;
} else {
int vs = (int)sqrt((float)nblocks);
griddims.x = vs;
griddims.y = (nblocks-1)/vs + 1;
}
if (nblocks == ncols) {
if (I == NULL) {
if (J == NULL) {
__fillToInds2Dxx<<<griddims,nthreads>>>(A, B, ldb, I, nrows, J, ncols);
} else {
__fillToInds2Dxn<<<griddims,nthreads>>>(A, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
__fillToInds2Dnx<<<griddims,nthreads>>>(A, B, ldb, I, nrows, J, ncols);
} else {
__fillToInds2Dnn<<<griddims,nthreads>>>(A, B, ldb, I, nrows, J, ncols);
}
}
} else {
if (I == NULL) {
if (J == NULL) {
__fillToInds2DBxx<<<griddims,nthreads>>>(A, B, ldb, I, nrows, J, ncols);
} else {
__fillToInds2DBxn<<<griddims,nthreads>>>(A, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
__fillToInds2DBnx<<<griddims,nthreads>>>(A, B, ldb, I, nrows, J, ncols);
} else {
__fillToInds2DBnn<<<griddims,nthreads>>>(A, B, ldb, I, nrows, J, ncols);
}
}
}
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __fillToInds3D(double A, double *B, int ldb, int rdb, int *I, int nrows, int *J, int ncols, int *K, int nk) {
int ii = threadIdx.x + blockDim.x * blockIdx.x;
int jj = threadIdx.y + blockDim.y * blockIdx.y;
int kk = threadIdx.z + blockDim.z * blockIdx.z;
int i, j, k, mapi, mapj, mapk;
for (k = kk; k < nk; k += blockDim.z * gridDim.z) {
mapk = k;
if (K != NULL) mapk = K[k];
for (j = jj; j < ncols; j += blockDim.y * gridDim.y) {
mapj = j;
if (J != NULL) mapj = J[j];
if (I != NULL) {
for (i = ii; i < nrows; i += blockDim.x * gridDim.x) {
mapi = I[i];
B[mapi + ldb * (mapj + rdb * mapk)] = A;
}
} else {
for (i = ii; i < nrows; i += blockDim.x * gridDim.x) {
mapi = i;
B[mapi + ldb * (mapj + rdb * mapk)] = A;
}
}
}
}
}
int fillToInds3D(double A, double *B, int ldb, int rdb, int *I, int nrows, int *J, int ncols, int *K, int nk) {
int ntx, nty, ntz, nbx, nby, nbz;
ntx = min(nrows, 1024);
nbx = min((nrows - 1) / ntx + 1, 1024);
nty = min(ncols, 1024/ntx);
nby = min((ncols - 1) / nty + 1, 1024);
ntz = min(nk, 1024/ntx/nty);
nbz = min((nk - 1) / ntz + 1, 1024);
dim3 blockdims(ntx, nty, ntz);
dim3 griddims(nbx, nby, nbz);
__fillToInds3D<<<griddims,blockdims>>>(A, B, ldb, rdb, I, nrows, J, ncols, K, nk);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __fillToInds4D(double A, double *B, int ldb, int rdb, int tdb, int *I, int nrows, int *J, int ncols, int *K, int nk, int *L, int nl, int ntk, int nbk, int ntl, int nbl) {
int ii = threadIdx.x + blockDim.x * blockIdx.x;
int jj = threadIdx.y + blockDim.y * blockIdx.y;
int tk = threadIdx.z / ntl;
int tl = threadIdx.z - tk * ntl;
int bk = blockIdx.z / nbl;
int bl = blockIdx.z - bk * nbl;
int kk = tk + ntk * bk;
int ll = tl + ntl * bl;
int i, j, k, l, mapi, mapj, mapk, mapl;
for (l = ll; l < nl; l += ntl * nbl) {
mapl = l;
if (L != NULL) mapl = L[l];
for (k = kk; k < nk; k += ntk * nbk) {
mapk = k;
if (K != NULL) mapk = K[k];
for (j = jj; j < ncols; j += blockDim.y * gridDim.y) {
mapj = j;
if (J != NULL) mapj = J[j];
if (I != NULL) {
for (i = ii; i < nrows; i += blockDim.x * gridDim.x) {
mapi = I[i];
B[mapi + ldb * (mapj + rdb * (mapk + tdb * mapl))] = A;
}
} else {
for (i = ii; i < nrows; i += blockDim.x * gridDim.x) {
B[i + ldb * (mapj + rdb * (mapk + tdb * mapl))] = A;
}
}
}
}
}
}
int fillToInds4D(double A, double *B, int ldb, int rdb, int tdb, int *I, int nrows, int *J, int ncols, int *K, int nk, int *L, int nl) {
int ntx, nty, ntk, ntl, nbx, nby, nbk, nbl;
ntx = min(nrows, 1024);
nbx = min((nrows - 1) / ntx + 1, 1024);
nty = min(ncols, 1024/ntx);
nby = min((ncols - 1) / nty + 1, 1024);
ntk = min(nk, 1024/ntx/nty);
nbk = min((nk - 1) / ntk + 1, 255);
ntl = min(nl, 1024/ntx/nty/ntk);
nbl = min((nl - 1) / ntl + 1, 255);
dim3 blockdims(ntx, nty, ntk * ntl);
dim3 griddims(nbx, nby, nbk * nbl);
__fillToInds4D<<<griddims,blockdims>>>(A, B, ldb, rdb, tdb, I, nrows, J, ncols, K, nk, L, nl, ntk, nbk, ntl, nbl);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
// Implement B = A[I,J]
// indexed copy: version with one block per column
#define COPYFROMINDS2DA(FNAME,IEXPR,JEXPR) \
__global__ void __copyFromInds2D##FNAME(double *A, int lda, double *B, int ldb, int *I, int nrows, int *J, int ncols) { \
int iblock = blockIdx.x + blockIdx.y * gridDim.x; \
if (iblock < ncols) { \
int icol = JEXPR; \
for (int i = threadIdx.x; i < nrows; i += blockDim.x) { \
B[i + iblock * ldb] = A[IEXPR + icol * lda]; \
} \
} \
}
COPYFROMINDS2DA(nn,I[i],J[iblock])
COPYFROMINDS2DA(xn,i,J[iblock])
COPYFROMINDS2DA(nx,I[i],iblock)
COPYFROMINDS2DA(xx,i,iblock)
// Implement B = A[I,J]
// indexed copy: version with one thread per element
#define COPYFROMINDS2DB(FNAME,IEXPR,JEXPR) \
__global__ void __copyFromInds2DB##FNAME(double *A, int lda, double *B, int ldb, int *I, int nrows, int *J, int ncols) { \
int indx = threadIdx.x + blockDim.x * (blockIdx.x + blockIdx.y * gridDim.x); \
if (indx < nrows * ncols) { \
int irow = indx % nrows; \
int icol = indx / nrows; \
B[irow + icol * ldb] = A[IEXPR + JEXPR * lda]; \
} \
}
COPYFROMINDS2DB(nn,I[irow],J[icol])
COPYFROMINDS2DB(xn,irow,J[icol])
COPYFROMINDS2DB(nx,I[irow],icol)
COPYFROMINDS2DB(xx,irow,icol)
// Implement B = A[I,J]
int copyFromInds2D(double *A, int lda, double *B, int ldb, int *I, int nrows, int *J, int ncols) {
int len = nrows * ncols;
int nthreads = max(32, min(1024, nrows));
int nblocks = min(ncols, (len-1)/nthreads + 1);
dim3 griddims;
griddims.x = 1;
griddims.y = 1;
griddims.z = 1;
if (nblocks < 65536) {
griddims.x = nblocks;
} else {
int vs = (int)sqrt((float)nblocks);
griddims.x = vs;
griddims.y = (nblocks-1)/vs + 1;
}
if (nblocks == ncols) {
if (I == NULL) {
if (J == NULL) {
__copyFromInds2Dxx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyFromInds2Dxn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
__copyFromInds2Dnx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyFromInds2Dnn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
}
} else {
if (I == NULL) {
if (J == NULL) {
__copyFromInds2DBxx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyFromInds2DBxn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
__copyFromInds2DBnx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyFromInds2DBnn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
}
}
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __copyFromInds3D(double *A, int lda, int rda, double *B, int ldb, int rdb, int *I, int nrows, int *J, int ncols, int *K, int nk) {
int ii = threadIdx.x + blockDim.x * blockIdx.x;
int jj = threadIdx.y + blockDim.y * blockIdx.y;
int kk = threadIdx.z + blockDim.z * blockIdx.z;
int i, j, k, mapi, mapj, mapk;
for (k = kk; k < nk; k += blockDim.z * gridDim.z) {
mapk = k;
if (K != NULL) mapk = K[k];
for (j = jj; j < ncols; j += blockDim.y * gridDim.y) {
mapj = j;
if (J != NULL) mapj = J[j];
if (I != NULL) {
for (i = ii; i < nrows; i += blockDim.x * gridDim.x) {
mapi = I[i];
B[i + ldb * (j + rdb * k)] = A[mapi + lda * (mapj + rda * mapk)];
}
} else {
for (i = ii; i < nrows; i += blockDim.x * gridDim.x) {
mapi = i;
B[i + ldb * (j + rdb * k)] = A[mapi + lda * (mapj + rda * mapk)];
}
}
}
}
}
int copyFromInds3D(double *A, int lda, int rda, double *B, int ldb, int rdb, int *I, int nrows, int *J, int ncols, int *K, int nk) {
int ntx, nty, ntz, nbx, nby, nbz;
ntx = min(nrows, 1024);
nbx = (nrows - 1) / ntx + 1;
nty = min(ncols, 1024/ntx);
nby = (ncols - 1) / nty + 1;
ntz = min(nk, 1024/(ntx*nty));
nbz = (nk - 1) / ntz + 1;
dim3 blockdims(ntx, nty, ntz);
dim3 griddims(nbx, nby, nbz);
__copyFromInds3D<<<griddims,blockdims>>>(A, lda, rda, B, ldb, rdb, I, nrows, J, ncols, K, nk);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __copyFromInds4D(double *A, int lda, int rda, int tda, double *B, int ldb, int rdb, int tdb, int *I, int nrows, int *J, int ncols, int *K, int nk, int *L, int nl, int ntk, int nbk, int ntl, int nbl) {
int ii = threadIdx.x + blockDim.x * blockIdx.x;
int jj = threadIdx.y + blockDim.y * blockIdx.y;
int tk = threadIdx.z / ntl;
int tl = threadIdx.z - tk * ntl;
int bk = blockIdx.z / nbl;
int bl = blockIdx.z - bk * nbl;
int kk = tk + ntk * bk;
int ll = tl + ntl * bl;
int i, j, k, l, mapi, mapj, mapk, mapl;
for (l = ll; l < nl; l += ntl * nbl) {
mapl = l;
if (L != NULL) mapl = L[l];
for (k = kk; k < nk; k += ntk * nbk) {
mapk = k;
if (K != NULL) mapk = K[k];
for (j = jj; j < ncols; j += blockDim.y * gridDim.y) {
mapj = j;
if (J != NULL) mapj = J[j];
if (I != NULL) {
for (i = ii; i < nrows; i += blockDim.x * gridDim.x) {
mapi = I[i];
B[i + ldb * (j + rdb * (k + tdb * l))] = A[mapi + lda * (mapj + rda * (mapk + tda * mapl))];
}
} else {
for (i = ii; i < nrows; i += blockDim.x * gridDim.x) {
B[i + ldb * (j + rdb * (k + tdb * l))] = A[i + lda * (mapj + rda * (mapk + tda * mapl))];
}
}
}
}
}
}
int copyFromInds4D(double *A, int lda, int rda, int tda, double *B, int ldb, int rdb, int tdb, int *I, int nrows, int *J, int ncols, int *K, int nk, int *L, int nl) {
int ntx, nty, ntk, ntl, nbx, nby, nbk, nbl;
ntx = min(nrows, 1024);
nbx = min((nrows - 1) / ntx + 1, 1024);
nty = min(ncols, 1024/ntx);
nby = min((ncols - 1) / nty + 1, 1024);
ntk = min(nk, 1024/ntx/nty);
nbk = min((nk - 1) / ntk + 1, 255);
ntl = min(nl, 1024/ntx/nty/ntk);
nbl = min((nl - 1) / ntl + 1, 255);
dim3 blockdims(ntx, nty, ntk * ntl);
dim3 griddims(nbx, nby, nbk * nbl);
__copyFromInds4D<<<griddims,blockdims>>>(A, lda, rda, tda, B, ldb, rdb, tdb, I, nrows, J, ncols, K, nk, L, nl, ntk, nbk, ntl, nbl);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __dsmult(int nrows, int nnz, double *A, double *Bdata, int *Bir, int *Bic, double *C) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int i = threadIdx.x; i < nrows; i += blockDim.x) {
double sum = 0;
for (int j = jstart; j < jend ; j++) {
sum += A[i + nrows * Bir[j]] * Bdata[j];
if (j == jend-1 || Bic[j] != Bic[j+1]) {
atomicAdd(&C[i + nrows * Bic[j]], sum);
sum = 0;
}
}
}
}
__global__ void __dsmultx(int nrows, int nnz, double *A, double *Bdata, int *Bir, int *Bic, double *C) {
int bid = threadIdx.y + blockDim.y * blockIdx.x;
int nb = blockDim.y * gridDim.x;
int jstart = ((long long)bid) * nnz / nb;
int jend = ((long long)(bid + 1)) * nnz / nb;
double sum = 0;
for (int j = jstart; j < jend ; j++) {
sum += A[threadIdx.x + nrows * Bir[j]] * Bdata[j];
if (j == jend-1 || Bic[j] != Bic[j+1]) {
atomicAdd(&C[threadIdx.x + nrows * Bic[j]], sum);
sum = 0;
}
}
}
int dsmult(int nrows, int ncols, int nnz, double *A, double *Bdata, int *Bir, int *Bic, double *C) {
if (nrows < 128) {
int nt = max(1, min(ncols/2, 256/nrows));
dim3 threadDim(nrows, nt, 1);
int nblocks = min(MAXXGRID, max(1, ncols/nt));
__dsmultx<<<nblocks,threadDim>>>(nrows, nnz, A, Bdata, Bir, Bic, C);
} else {
int nthreads = min(1024, nrows);
int nblocks = min(MAXXGRID, ncols);
__dsmult<<<nblocks,nthreads>>>(nrows, nnz, A, Bdata, Bir, Bic, C);
}
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
int dsmult_tune(int nrows, int ncols, int nnz, double *A, double *Bdata, int *Bir, int *Bic, double *C, int nblocks, int nthreads) {
__dsmult<<<nblocks,nthreads>>>(nrows, nnz, A, Bdata, Bir, Bic, C);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
int dsmultx_tune(int nrows, int ncols, int nnz, double *A, double *Bdata, int *Bir, int *Bic, double *C, int nblocks, int nthreadsx, int nthreadsy) {
dim3 threadDim(nthreadsx, nthreadsy, 1);
__dsmultx<<<nblocks,threadDim>>>(nrows, nnz, A, Bdata, Bir, Bic, C);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __dsmultT(int nrows, int nnz, double *A, double *Bdata, int *Bir, int *Bic, double *C) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int i = threadIdx.x; i < nrows; i += blockDim.x) {
double aval = 0;
for (int j = jstart; j < jend ; j++) {
if (j == jstart || Bic[j-1] != Bic[j]) {
aval = A[i + nrows * Bic[j]];
}
atomicAdd(&C[i + nrows * Bir[j]], aval * Bdata[j]);
}
}
}
__global__ void __dsmultTx(int nrows, int nnz, double *A, double *Bdata, int *Bir, int *Bic, double *C) {
int bid = threadIdx.y + blockDim.y * blockIdx.x;
int nb = blockDim.y * gridDim.x;
int jstart = ((long long)bid) * nnz / nb;
int jend = ((long long)(bid + 1)) * nnz / nb;
double aval = 0;
for (int j = jstart; j < jend ; j++) {
if (j == jstart || Bic[j-1] != Bic[j]) {
aval = A[threadIdx.x + nrows * Bic[j]];
}
atomicAdd(&C[threadIdx.x + nrows * Bir[j]], aval * Bdata[j]);
}
}
int dsmultT(int nrows, int ncols, int nnz, double *A, double *Bdata, int *Bir, int *Bic, double *C) {
if (nrows < 128) {
int nt = max(1, min(ncols/2, 256/nrows));
dim3 threadDim(nrows, nt, 1);
int nblocks = min(MAXXGRID, max(1, ncols/nt));
__dsmultTx<<<nblocks,threadDim>>>(nrows, nnz, A, Bdata, Bir, Bic, C);
} else {
int nthreads = min(1024, nrows);
int nblocks = min(MAXXGRID, ncols);
__dsmultT<<<nblocks,nthreads>>>(nrows, nnz, A, Bdata, Bir, Bic, C);
}
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __spsum1(int nrows, int ncols, int nnz, int *Air, int *Aic, double *P, double *B) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int i = jstart + threadIdx.x; i < jend; i += blockDim.x) {
atomicAdd(&B[Aic[i]], P[i]);
}
}
__global__ void __spsum2(int nrows, int ncols, int nnz, int *Air, int *Aic, double *P, double *B) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int i = jstart + threadIdx.x; i < jend; i += blockDim.x) {
atomicAdd(&B[Air[i]], P[i]);
}
}
int spsum(int nrows, int ncols, int nnz, int *Air, int *Aic, double *P, double *B, int n) {
int nthreads = max(32, min(128, nnz));
int nblks = min(65536, max(1, (nnz-1) / 128));
if (n == 1) {
__spsum1<<<nblks,nthreads>>>(nrows, ncols, nnz, Air, Aic, P, B);
} else {
__spsum2<<<nblks,nthreads>>>(nrows, ncols, nnz, Air, Aic, P, B);
}
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __dds(int nrows, int nnz, double *A, double *B, int *Cir, int *Cic, double *P);
__global__ void __dds0(int nrows, int ncols, double *A, double *B, int *Cir, int *Cic, double *P);
#define DDS_BLKY 32
#if __CUDA_ARCH__ > 200
__global__ void __dds(int nrows, int nnz, double *A, double *B, int *Cir, int *Cic, double *P) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
int tid = threadIdx.x + blockDim.x * threadIdx.y;
for (int j = jstart; j < jend ; j++) {
double sum = 0;
int aoff = nrows * Cir[j];
int boff = nrows * Cic[j];
for (int i = tid; i < nrows; i += blockDim.x * blockDim.y) {
sum += A[i + aoff] * B[i + boff];
}
for (int i = 1; i < blockDim.x; i *= 2) {
double tmp = __shfl_down(sum, i);
if (threadIdx.x + i < blockDim.x) sum = sum + tmp;
}
if (threadIdx.x == 0) {
atomicAdd(&P[j], sum);
}
}
}
__global__ void __dds0(int nrows, int ncols, double *A, double *B, int *Cir, int *Cjc, double *P) {
__shared__ double merge[32];
int jstart = ((long long)blockIdx.x) * ncols / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * ncols / gridDim.x;
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int aoff, boff;
double user, prod, sum, bsum;
for (int j0 = jstart; j0 < jend ; j0++) {
boff = nrows * j0;
user = B[tid + boff];
for (int j = Cjc[j0]; j < Cjc[j0+1]; j++) {
aoff = nrows * Cir[j];
prod = A[tid + aoff] * user;
sum = prod + __shfl_down(prod, 1);
sum = sum + __shfl_down(sum, 2);
sum = sum + __shfl_down(sum, 4);
sum = sum + __shfl_down(sum, 8);
sum = sum + __shfl_down(sum, 16);
bsum = __shfl(sum, 0);
__syncthreads();
if (threadIdx.x == threadIdx.y) {
merge[threadIdx.x] = bsum;
}
__syncthreads();
if (threadIdx.y == 0) {
sum = merge[threadIdx.x];
sum = sum + __shfl_down(sum, 1);
sum = sum + __shfl_down(sum, 2);
sum = sum + __shfl_down(sum, 4);
sum = sum + __shfl_down(sum, 8);
sum = sum + __shfl_down(sum, 16);
if (threadIdx.x == 0) {
P[j] = sum;
}
}
}
}
}
#else
__global__ void __dds(int nrows, int nnz, double *A, double *B, int *Cir, int *Cic, double *P) {
__shared__ double parts[32*DDS_BLKY];
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
int tid = threadIdx.x + blockDim.x * threadIdx.y;
for (int j = jstart; j < jend ; j++) {
double sum = 0;
int aoff = nrows * Cir[j];
int boff = nrows * Cic[j];
for (int i = tid; i < nrows; i += blockDim.x * blockDim.y) {
sum += A[i + aoff] * B[i + boff];
}
parts[tid] = sum;
for (int i = 1; i < blockDim.x * blockDim.y; i *= 2) {
__syncthreads();
if (i + tid < blockDim.x * blockDim.y) {
parts[tid] = parts[tid] + parts[i + tid];
}
}
__syncthreads();
if (tid == 0) {
P[j] = parts[0];
}
__syncthreads();
}
}
__global__ void __dds0(int nrows, int ncols, double *A, double *B, int *Cir, int *Cjc, double *P) {}
#endif
int dds(int nrows, int nnz, double *A, double *B, int *Cir, int *Cic, double *P) {
dim3 blockDims(min(32,nrows), min(DDS_BLKY, 1+(nrows-1)/64), 1);
// int nblocks = min(65536, max(1,nnz/8));
int nblocks = min(16384, max(1,nnz/128));
__dds<<<nblocks,blockDims>>>(nrows, nnz, A, B, Cir, Cic, P);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
int dds0(int nrows, int ncols, double *A, double *B, int *Cir, int *Cic, double *P) {
dim3 blockDims(32, 32, 1);
// int nblocks = min(65536, max(1,nnz/8));
int nblocks = min(16384, max(1,ncols/64));
__dds0<<<nblocks,blockDims>>>(nrows, ncols, A, B, Cir, Cic, P);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
#define BLOCKDIM 32
__global__ void __transpose(double *in, int instride, double *out, int outstride, int nrows, int ncols) {
int nx = BLOCKDIM * gridDim.x;
int ny = BLOCKDIM * gridDim.y;
int ix = BLOCKDIM * blockIdx.x;
int iy = BLOCKDIM * blockIdx.y;
__shared__ double tile[BLOCKDIM][BLOCKDIM+1];
for (int yb = iy; yb < ncols; yb += ny) {
for (int xb = ix; xb < nrows; xb += nx) {
if (xb + threadIdx.x < nrows) {
int ylim = min(ncols, yb + BLOCKDIM);
for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) {
tile[threadIdx.x][y-yb] = in[threadIdx.x+xb + y*instride];
}
}
__syncthreads();
if (yb + threadIdx.x < ncols) {
int xlim = min(nrows, xb + BLOCKDIM);
for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) {
out[threadIdx.x + yb + x*outstride] = tile[x-xb][threadIdx.x];
}
}
__syncthreads();
}
}
}
int transpose(double *in, int instride, double *out, int outstride, int nrows, int ncols) {
int gridx = min(32, 1+(nrows-1)/256);
int gridy = min(32, 1+(ncols-1)/256);
const dim3 griddims(gridx, gridy, 1);
const dim3 blockdims(BLOCKDIM,16,1);
cudaError_t err;
int dev = -1;
cudaGetDevice(&dev);
__transpose<<<griddims,blockdims>>>(in, instride, out, outstride, nrows, ncols);
cudaStreamSynchronize(SYNC_STREAM);
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "cuda error device %d in transpose of %dx%d matrix", dev, nrows, ncols);
return err;
}
return 0;
}
__global__ void __embedmat2d(double *a, long long *b, int nrows, int ncols, int sortdown) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
const int signbit = 0x80000000;
const int mag = 0x7fffffff;
int icol;
for (int i = tid; i < nrows*ncols; i += blockDim.x*gridDim.x*gridDim.y) {
double v = a[i];
int vi = *((int *)&v);
if (vi & signbit) {
vi = -(vi & mag);
}
icol = (i/nrows+1);
if (sortdown) icol = ncols - icol + 1;
b[i] = (long long)vi + (((long long)icol)<<32);
}
}
__global__ void __embedmat(double *a, int *b, long long *c, int n) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
const int signbit = 0x80000000;
const int mag = 0x7fffffff;
for (int i = tid; i < n; i += blockDim.x*gridDim.x*gridDim.y) {
double v = a[i];
int vi = *((int *)&v);
if (vi & signbit) {
vi = -(vi & mag);
}
c[i] = (long long)vi + (((long long)b[i])<<32);
}
}
int embedmat2d(double *a, long long *b, int nrows, int ncols, int sortdown) {
int nthreads;
dim3 griddims;
setsizesLeanD(nrows*ncols, &griddims, &nthreads);
__embedmat2d<<<griddims,nthreads>>>(a, b, nrows, ncols, sortdown);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
int embedmat(double *a, int *b, long long *c, int n) {
int nthreads;
dim3 griddims;
setsizesLeanD(n, &griddims, &nthreads);
__embedmat<<<griddims,nthreads>>>(a, b, c, n);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __extractmat2d(double *a, long long *b, int nrows, int ncols) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
const int signbit = 0x80000000;
const int mag = 0x7fffffff;
for (int i = tid; i < nrows*ncols; i += blockDim.x*gridDim.x*gridDim.y) {
int vi = *((int *)&b[i]);
if (vi & signbit) {
vi = -(vi & mag);
}
a[i] = *((double *)&vi);
}
}
__global__ void __extractmat(double *a, int *b, long long *c, int n) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
const int signbit = 0x80000000;
const int mag = 0x7fffffff;
for (int i = tid; i < n; i += blockDim.x*gridDim.x*gridDim.y) {
int vi = *((int *)&c[i]);
if (vi & signbit) {
vi = -(vi & mag);
}
a[i] = *((double *)&vi);
b[i] = *(((int *)&c[i])+1);
}
}
int extractmat2d(double *a, long long *b, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizesLeanD(nrows*ncols, &griddims, &nthreads);
__extractmat2d<<<griddims,nthreads>>>(a, b, nrows, ncols);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
int extractmat(double *a, int *b, long long *c, int n) {
int nthreads;
dim3 griddims;
setsizesLeanD(n, &griddims, &nthreads);
__extractmat<<<griddims,nthreads>>>(a, b, c, n);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
#include <thrust/sort.h>
#include <thrust/device_ptr.h>
#include <thrust/reverse.h>
int fsort2d(double *pkeys, unsigned int *pvals, int nrows, int ncols, int asc) {
for (int i = 0; i < ncols; i++) {
thrust::device_ptr<double> keys(pkeys+i*nrows);
thrust::device_ptr<unsigned int> vals(pvals+i*nrows);
if (asc > 0) {
thrust::sort_by_key(keys, keys + nrows, vals);
} else {
thrust::sort_by_key(keys, keys + nrows, vals, thrust::greater<double>());
}
}
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
int fsort(double *pkeys, int N, int asc) {
thrust::device_ptr<double> keys(pkeys);
if (asc > 0) {
thrust::sort(keys, keys + N);
} else {
thrust::sort(keys, keys + N, thrust::greater<int>());
}
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
int fsorts(double *pkeys, unsigned int *pvals, int *jc, int m, int asc) {
for (int i = 0; i < m; i++) {
thrust::device_ptr<double> keys(pkeys + jc[i]);
thrust::device_ptr<unsigned int> vals(pvals + jc[i]);
int b = jc[i+1] - jc[i];
if (asc > 0) {
thrust::sort_by_key(keys, keys + b, vals);
} else {
thrust::sort_by_key(keys, keys + b, vals, thrust::greater<double>());
}
}
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
#if CUDA_VERSION >= 7000
#if CUDA_VERSION >= 9000
#include <thrust/system/cuda/detail/cub/cub.cuh>
long long disortcubsize(double *inkeys, double *outkeys, unsigned int *invals, unsigned int *outvals, int nelems, int asc) {
size_t size = 0;
void *temp = NULL;
thrust::cuda_cub::cub::DoubleBuffer<double> d_keys(inkeys, outkeys);
thrust::cuda_cub::cub::DoubleBuffer<unsigned int> d_vals(invals, outvals);
if (asc > 0) {
thrust::cuda_cub::cub::DeviceRadixSort::SortPairs(temp, size, d_keys, d_vals, nelems);
} else {
thrust::cuda_cub::cub::DeviceRadixSort::SortPairsDescending(temp, size, d_keys, d_vals, nelems);
}
cudaStreamSynchronize(SYNC_STREAM);
return size;
}
int disortcub(double *inkeys, double *outkeys, unsigned int *invals, unsigned int *outvals, int *temp, long long size, int nelems, int asc) {
thrust::cuda_cub::cub::DoubleBuffer<double> d_keys(inkeys, outkeys);
thrust::cuda_cub::cub::DoubleBuffer<unsigned int> d_vals(invals, outvals);
if (asc > 0) {
thrust::cuda_cub::cub::DeviceRadixSort::SortPairs((void *)temp, (size_t &)size, d_keys, d_vals, nelems);
} else {
thrust::cuda_cub::cub::DeviceRadixSort::SortPairsDescending((void *)temp, (size_t &)size, d_keys, d_vals, nelems);
}
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
int fsort2dx(double *pkeys, unsigned int *pvals, double *tkeys, unsigned int *tvals,
int nrows, int ncols, int asc) {
int i;
cudaError_t err;
long long ntemp;
int * temp;
ntemp = disortcubsize(pkeys, tkeys, pvals, tvals, nrows, asc);
cudaMalloc(&temp, ntemp * sizeof(int));
cudaStreamSynchronize(SYNC_STREAM);
for (i = 0; i < ncols; i++) {
thrust::cuda_cub::cub::DoubleBuffer<double> d_keys(pkeys + (nrows * i), tkeys + (nrows * i));
thrust::cuda_cub::cub::DoubleBuffer<unsigned int> d_vals(pvals + (nrows * i), tvals + (nrows * i));
if (asc > 0) {
thrust::cuda_cub::cub::DeviceRadixSort::SortPairs((void *)temp, (size_t &)ntemp, d_keys, d_vals, nrows);
} else {
thrust::cuda_cub::cub::DeviceRadixSort::SortPairsDescending((void *)temp, (size_t &)ntemp, d_keys, d_vals, nrows);
}
}
cudaStreamSynchronize(SYNC_STREAM);
cudaFree(temp);
err = cudaGetLastError();
return err;
}
#else
long long disortcubsize(double *inkeys, double *outkeys, unsigned int *invals, unsigned int *outvals, int nelems, int asc) {
size_t size = 0;
void *temp = NULL;
thrust::system::cuda::detail::cub_::DoubleBuffer<double> d_keys(inkeys, outkeys);
thrust::system::cuda::detail::cub_::DoubleBuffer<unsigned int> d_vals(invals, outvals);
if (asc > 0) {
thrust::system::cuda::detail::cub_::DeviceRadixSort::SortPairs(temp, size, d_keys, d_vals, nelems);
} else {
thrust::system::cuda::detail::cub_::DeviceRadixSort::SortPairsDescending(temp, size, d_keys, d_vals, nelems);
}
cudaStreamSynchronize(SYNC_STREAM);
return size;
}
int disortcub(double *inkeys, double *outkeys, unsigned int *invals, unsigned int *outvals, int *temp, long long size, int nelems, int asc) {
thrust::system::cuda::detail::cub_::DoubleBuffer<double> d_keys(inkeys, outkeys);
thrust::system::cuda::detail::cub_::DoubleBuffer<unsigned int> d_vals(invals, outvals);
if (asc > 0) {
thrust::system::cuda::detail::cub_::DeviceRadixSort::SortPairs((void *)temp, (size_t &)size, d_keys, d_vals, nelems);
} else {
thrust::system::cuda::detail::cub_::DeviceRadixSort::SortPairsDescending((void *)temp, (size_t &)size, d_keys, d_vals, nelems);
}
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
int fsort2dx(double *pkeys, unsigned int *pvals, double *tkeys, unsigned int *tvals,
int nrows, int ncols, int asc) {
int i;
cudaError_t err;
long long ntemp;
int * temp;
ntemp = disortcubsize(pkeys, tkeys, pvals, tvals, nrows, asc);
cudaMalloc(&temp, ntemp * sizeof(int));
cudaStreamSynchronize(SYNC_STREAM);
for (i = 0; i < ncols; i++) {
thrust::system::cuda::detail::cub_::DoubleBuffer<double> d_keys(pkeys + (nrows * i), tkeys + (nrows * i));
thrust::system::cuda::detail::cub_::DoubleBuffer<unsigned int> d_vals(pvals + (nrows * i), tvals + (nrows * i));
if (asc > 0) {
thrust::system::cuda::detail::cub_::DeviceRadixSort::SortPairs((void *)temp, (size_t &)ntemp, d_keys, d_vals, nrows);
} else {
thrust::system::cuda::detail::cub_::DeviceRadixSort::SortPairsDescending((void *)temp, (size_t &)ntemp, d_keys, d_vals, nrows);
}
}
cudaStreamSynchronize(SYNC_STREAM);
cudaFree(temp);
err = cudaGetLastError();
return err;
}
#endif
#endif
__global__ void __stratify(double *strata, int n, double *a, double *b, unsigned int *bi, int stride) {
__shared__ double ss[32];
__shared__ unsigned int ibin[32];
__shared__ unsigned int ebin[32];
__shared__ unsigned int todo[32];
__shared__ double bins[64][33];
__shared__ unsigned int topush;
int tid = threadIdx.x;
ss[tid] = strata[tid];
ibin[tid] = 0;
for (int i = 0; i < n; i += blockDim.x * gridDim.x) {
int ii = i + tid + blockDim.x * blockIdx.x;
if (tid == 0) topush = 0;
if (ii < n) {
double v = a[ii];
int j = 1;
j = (v > ss[j-1]) ? 2*j+1 : 2*j;
j = (v > ss[j-1]) ? 2*j+1 : 2*j;
j = (v > ss[j-1]) ? 2*j+1 : 2*j;
j = (v > ss[j-1]) ? 2*j+1 : 2*j;
j = (v > ss[j-1]) ? 2*j+1 : 2*j;
j = j - 32;
int k = atomicInc(&ibin[j], 256);
bins[k][j] = v;
if (k == 31) {
k = atomicInc(&topush, 1024);
todo[k] = j;
}
}
if (ibin[tid] >= 32) {
ebin[tid] = atomicAdd(&bi[tid], 32);
ibin[tid] = ibin[tid] - 32;
}
for (int k = 0; k < topush; k++) {
int j = todo[k];
b[j*stride + ebin[j] + tid] = bins[ibin[j] + tid][j];
}
}
ebin[tid] = atomicAdd(&bi[tid], ibin[tid]);
for (int j = 0; j < 32; j++) {
if (tid < ibin[j]) {
b[j*stride + ebin[j] + tid] = bins[tid][j];
}
}
}
int stratify(double *strata, int n, double *a, double *b, unsigned int *bi, int stride) {
__stratify<<<40,32>>>(strata, n, a, b, bi, stride);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
#define SNDVALS 256
#define SNDGRPS 4
#define SNTHREADS 1024
#define SBIGBLK (4*1024)
__global__ void __stratifycounts(double *strata, int n, double *a, unsigned int *bi) {
__shared__ unsigned int ic[SNDVALS][SNDGRPS];
__shared__ double ss[SNDVALS];
int istart = (int)(((long long)blockIdx.x) * n / gridDim.x);
int iend = (int)(((long long)(blockIdx.x+1)) * n / gridDim.x);
int bibase = SNDVALS * (blockIdx.x + istart / SBIGBLK);
int tid = threadIdx.x + threadIdx.y * blockDim.x;
if (threadIdx.y == 0) {
ss[threadIdx.x] = strata[threadIdx.x];
}
for (int i = istart; i < iend; i += SBIGBLK) {
__syncthreads();
if (threadIdx.y < SNDGRPS) {
ic[threadIdx.x][threadIdx.y] = 0;
}
__syncthreads();
for (int k = i + tid; k < min(iend, i + tid + SBIGBLK); k += SNTHREADS) {
double v = a[k];
int j = 0;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = j - SNDVALS + 1;
atomicInc(&ic[j][threadIdx.y], 65536*32767);
}
__syncthreads();
if (threadIdx.y == 0) {
bi[bibase + threadIdx.x] = ic[threadIdx.x][0] + ic[threadIdx.x][1] + ic[threadIdx.x][2] + ic[threadIdx.x][3];
}
bibase += SNDVALS;
}
}
int stratifycounts(double *strata, int n, double *a, unsigned int *bi) {
const dim3 blockdims(SNDVALS, SNTHREADS/SNDVALS, 1);
const dim3 griddims(8,1,1);
__stratifycounts<<<griddims,blockdims>>>(strata, n, a, bi);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
#define RNDVALS 256
#define RNTHREADS 256
#define RNDBITS 8
#define RBIGBLK (4*1024)
__global__ void __radixcounts(double *a, int n, int digit, unsigned int *bi) {
__shared__ unsigned int ic[RNDVALS];
int istart = (int)(((long long)blockIdx.x) * n / gridDim.x);
int iend = (int)(((long long)(blockIdx.x+1)) * n / gridDim.x);
int tid = threadIdx.x;
int bibase = RNDVALS * (blockIdx.x + istart / RBIGBLK);
for (int i = istart; i < iend; i += RBIGBLK) {
__syncthreads();
ic[threadIdx.x] = 0;
__syncthreads();
for (int j = i + tid; j < min(iend, i+tid+RBIGBLK); j += RNTHREADS) {
double v = a[j];
unsigned char *cv = (unsigned char *)&v;
atomicInc(&ic[cv[digit]], 65536*32767);
}
__syncthreads();
bi[bibase + threadIdx.x] = ic[threadIdx.x];
bibase += RNDVALS;
}
}
int radixcounts(double *a, int n, int digit, unsigned int *bi) {
const dim3 blockdims(RNTHREADS,1,1);
const dim3 griddims(32,1,1);
__radixcounts<<<griddims,blockdims>>>(a, n, digit, bi);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
#if __CUDA_ARCH__ > 200
#define GENDISTS(DFNAME,DFUNC) \
__global__ void DFNAME(double *A, int lda, double *B, int ldb, double *C, \
int ldc, int d, int nrows, int ncols, double p) { \
int xblk = blockDim.x * (threadIdx.y + blockIdx.y * blockDim.y); \
int yblk = blockDim.x * (threadIdx.z + blockIdx.z * blockDim.z); \
double va, vb, vc; \
double R00, R01, R02, R03, R04, R05, R06, R07, R08, R09, R10, R11, R12, R13, R14, R15, \
R16, R17, R18, R19, R20, R21, R22, R23, R24, R25, R26, R27, R28, R29, R30, R31; \
int xi = threadIdx.x + xblk; \
int yi = threadIdx.x; \
if (xi < nrows) { \
if (yi+yblk < ncols) {R00 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R01 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R02 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R03 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R04 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R05 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R06 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R07 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R08 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R09 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R10 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R11 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R12 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R13 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R14 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R15 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R16 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R17 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R18 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R19 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R20 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R21 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R22 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R23 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R24 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R25 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R26 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R27 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R28 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R29 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R30 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R31 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
} \
yi = threadIdx.x + yblk; \
int nbr = (threadIdx.x + 1) % blockDim.x; \
for (int i = 0; i < d; i++) { \
va = (xi < nrows) ? A[xi + i * lda] : 0; \
vb = (yi < ncols) ? B[yi + i * ldb] : 0; \
vc=R00; DFUNC; R00=vc; vb=__shfl(vb, nbr); vc=R01; DFUNC; R01=vc; vb=__shfl(vb, nbr); \
vc=R02; DFUNC; R02=vc; vb=__shfl(vb, nbr); vc=R03; DFUNC; R03=vc; vb=__shfl(vb, nbr); \
vc=R04; DFUNC; R04=vc; vb=__shfl(vb, nbr); vc=R05; DFUNC; R05=vc; vb=__shfl(vb, nbr); \
vc=R06; DFUNC; R06=vc; vb=__shfl(vb, nbr); vc=R07; DFUNC; R07=vc; vb=__shfl(vb, nbr); \
vc=R08; DFUNC; R08=vc; vb=__shfl(vb, nbr); vc=R09; DFUNC; R09=vc; vb=__shfl(vb, nbr); \
vc=R10; DFUNC; R10=vc; vb=__shfl(vb, nbr); vc=R11; DFUNC; R11=vc; vb=__shfl(vb, nbr); \
vc=R12; DFUNC; R12=vc; vb=__shfl(vb, nbr); vc=R13; DFUNC; R13=vc; vb=__shfl(vb, nbr); \
vc=R14; DFUNC; R14=vc; vb=__shfl(vb, nbr); vc=R15; DFUNC; R15=vc; vb=__shfl(vb, nbr); \
vc=R16; DFUNC; R16=vc; vb=__shfl(vb, nbr); vc=R17; DFUNC; R17=vc; vb=__shfl(vb, nbr); \
vc=R18; DFUNC; R18=vc; vb=__shfl(vb, nbr); vc=R19; DFUNC; R19=vc; vb=__shfl(vb, nbr); \
vc=R20; DFUNC; R20=vc; vb=__shfl(vb, nbr); vc=R21; DFUNC; R21=vc; vb=__shfl(vb, nbr); \
vc=R22; DFUNC; R22=vc; vb=__shfl(vb, nbr); vc=R23; DFUNC; R23=vc; vb=__shfl(vb, nbr); \
vc=R24; DFUNC; R24=vc; vb=__shfl(vb, nbr); vc=R25; DFUNC; R25=vc; vb=__shfl(vb, nbr); \
vc=R26; DFUNC; R26=vc; vb=__shfl(vb, nbr); vc=R27; DFUNC; R27=vc; vb=__shfl(vb, nbr); \
vc=R28; DFUNC; R28=vc; vb=__shfl(vb, nbr); vc=R29; DFUNC; R29=vc; vb=__shfl(vb, nbr); \
vc=R30; DFUNC; R30=vc; vb=__shfl(vb, nbr); vc=R31; DFUNC; R31=vc; vb=__shfl(vb, nbr); \
} \
yi = threadIdx.x; \
if (xi < nrows) { \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R00;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R01;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R02;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R03;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R04;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R05;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R06;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R07;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R08;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R09;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R10;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R11;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R12;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R13;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R14;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R15;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R16;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R17;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R18;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R19;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R20;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R21;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R22;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R23;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R24;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R25;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R26;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R27;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R28;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R29;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R30;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R31;} yi = (yi+1) % blockDim.x; \
} \
}
GENDISTS(__l1dist,vc+=abs(va-vb))
GENDISTS(__l2dist,vc+=(va-vb)*(va-vb))
GENDISTS(__minkowskidist,vc+=pow(abs(va-vb),p))
GENDISTS(__linfdist,vc=max(vc,abs(va-vb)))
GENDISTS(__msum,vc=max(vc,va+vb))
#else
__global__ void __l1dist(double *A, int lda, double *B, int ldb, double *C, int ldc, int d, int nrows, int ncols, double p) {
printf("Warning, Lidist not supported on arch <= 200\n");
}
__global__ void __l2dist(double *A, int lda, double *B, int ldb, double *C, int ldc, int d, int nrows, int ncols, double p) {
printf("Warning, L2dist not supported on arch <= 200\n");
}
__global__ void __minkowskidist(double *A, int lda, double *B, int ldb, double *C, int ldc, int d, int nrows, int ncols, double p) {
printf("Warning, Minkowski distance not supported on arch <= 200\n");
}
__global__ void __linfdist(double *A, int lda, double *B, int ldb, double *C, int ldc, int d, int nrows, int ncols, double p) {
printf("Warning, Max-abs distance not supported on arch <= 200\n");
}
__global__ void __msum(double *A, int lda, double *B, int ldb, double *C, int ldc, int d, int nrows, int ncols, double p) {
printf("Warning, Max-sum multiply not supported on arch <= 200\n");
}
#endif
int dists(double *A, int lda, double *B, int ldb, double *C, int ldc, int d, int nrows, int ncols, double p) {
dim3 blockdim(32,4,4);
dim3 griddim(1,1+(nrows-1)/128,1+(ncols-1)/128);
// cudaSetDevice(ithread);
if (p == 0.0f) {
__linfdist<<<griddim,blockdim>>>(A, lda, B, ldb, C, ldc, d, nrows, ncols, p);
} else if (p == 1.0f) {
__l1dist<<<griddim,blockdim>>>(A, lda, B, ldb, C, ldc, d, nrows, ncols, p);
} else if (p == 2.0f) {
__l2dist<<<griddim,blockdim>>>(A, lda, B, ldb, C, ldc, d, nrows, ncols, p);
} else {
__minkowskidist<<<griddim,blockdim>>>(A, lda, B, ldb, C, ldc, d, nrows, ncols, p);
}
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
int maxsumx(double *A, int lda, double *B, int ldb, double *C, int ldc, int d, int nrows, int ncols) {
dim3 blockdim(32,4,4);
dim3 griddim(1,1+(nrows-1)/128,1+(ncols-1)/128);
__msum<<<griddim,blockdim>>>(A, lda, B, ldb, C, ldc, d, nrows, ncols, 0);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
#if __CUDA_ARCH__ > 200
template<class T>
__global__ void __cumsumg(T *in, T *out, int *jc, int nrows, int ncols, int m) {
__shared__ T tots[32];
int start, end, ij;
int bid = blockIdx.y + blockIdx.z * blockDim.y; // column index
T sum, tsum, tmp, ttot, ttot0;
if (bid < ncols) {
for (ij = blockIdx.x; ij < m; ij += gridDim.x) {
start = jc[ij] + bid * nrows;
end = jc[ij+1] + bid * nrows;
sum = 0;
for (int i = start + threadIdx.x + threadIdx.y * blockDim.x; i < end; i += blockDim.x * blockDim.y) {
tsum = in[i];
tmp = __shfl_up(tsum, 1);
if (threadIdx.x >= 1) tsum += tmp;
tmp = __shfl_up(tsum, 2);
if (threadIdx.x >= 2) tsum += tmp;
tmp = __shfl_up(tsum, 4);
if (threadIdx.x >= 4) tsum += tmp;
tmp = __shfl_up(tsum, 8);
if (threadIdx.x >= 8) tsum += tmp;
tmp = __shfl_up(tsum, 16);
if (threadIdx.x >= 16) tsum += tmp;
ttot = __shfl(tsum, min(end-start-1, 31));
ttot0 = ttot;
__syncthreads();
if (threadIdx.x == threadIdx.y) {
tots[threadIdx.y] = ttot;
}
__syncthreads();
for (int k = 1; k < blockDim.y; k *= 2) {
if (threadIdx.y >= k) {
if (threadIdx.x == threadIdx.y - k) {
ttot += tots[threadIdx.x];
}
}
__syncthreads();
if (threadIdx.y >= k) {
ttot = __shfl(ttot, threadIdx.y - k);
if (threadIdx.x == threadIdx.y) {
tots[threadIdx.y] = ttot;
}
}
__syncthreads();
}
out[i] = sum + tsum + ttot - ttot0;
if (threadIdx.x == blockDim.y - 1) {
ttot = tots[threadIdx.x];
}
__syncthreads();
ttot = __shfl(ttot, blockDim.y - 1);
sum += ttot;
}
}
}
}
template<class T>
__global__ void __maxming(T *in, T *out, int *outi, int *jc, int nrows, int ncols, int m, T maxminv, int dir) {
__shared__ T maxv[32];
__shared__ int maxi[32];
T vmax, vtmp;
int imax, itmp, i, k, start, end, ij;
int bid = blockIdx.y + blockIdx.z * gridDim.y;
if (bid < ncols) {
for (ij = blockIdx.x; ij < m; ij += gridDim.x) {
vmax = maxminv;
imax = -1;
start = jc[ij];
end = jc[ij+1];
for (i = start + threadIdx.x + threadIdx.y * blockDim.x; i < end; i += blockDim.x * blockDim.y) {
vtmp = in[i + nrows * bid];
itmp = i;
if (dir ? (vtmp > vmax) : (vtmp < vmax)) {
vmax = vtmp;
imax = itmp;
}
}
for (k = 1; k < blockDim.x; k *= 2) {
vtmp = __shfl_up(vmax, k);
itmp = __shfl_up(imax, k);
if (threadIdx.x >= k) {
if (dir ? (vtmp > vmax) : (vtmp < vmax)) {
vmax = vtmp;
imax = itmp;
}
}
}
vmax = __shfl(vmax, blockDim.x - 1);
imax = __shfl(imax, blockDim.x - 1);
__syncthreads();
if (threadIdx.x == threadIdx.y) {
maxv[threadIdx.y] = vmax;
maxi[threadIdx.y] = imax;
}
__syncthreads();
if (threadIdx.y == 0) {
vmax = maxv[threadIdx.x];
imax = maxi[threadIdx.x];
}
__syncthreads();
if (threadIdx.y == 0) {
for (k = 1; k < blockDim.y; k *= 2) {
vtmp = __shfl_up(vmax, k);
itmp = __shfl_up(imax, k);
if (threadIdx.x >= k) {
if (dir ? (vtmp > vmax) : (vtmp < vmax)) {
vmax = vtmp;
imax = itmp;
}
}
}
if (threadIdx.x == blockDim.y - 1) {
out[ij + m * bid] = vmax;
outi[ij + m * bid] = imax;
}
}
}
}
}
template<class T>
__global__ void __maxmini_cols(T *in, T *out, int *outi, int nrows, int ncols, T maxminv, int dir) {
__shared__ T maxv[32];
__shared__ int maxi[32];
T vmax, vtmp;
int imax, itmp, i, k;
int bid = blockIdx.x + blockIdx.y * gridDim.x;
if (bid < ncols) {
vmax = maxminv;
imax = -1;
for (i = threadIdx.x + threadIdx.y * blockDim.x; i < nrows; i += blockDim.x * blockDim.y) {
vtmp = in[i + nrows * bid];
itmp = i;
if (dir ? (vtmp > vmax) : (vtmp < vmax)) {
vmax = vtmp;
imax = itmp;
}
}
for (k = 1; k < blockDim.x; k *= 2) {
vtmp = __shfl_up(vmax, k);
itmp = __shfl_up(imax, k);
if (threadIdx.x >= k) {
if (dir ? (vtmp > vmax) : (vtmp < vmax)) {
vmax = vtmp;
imax = itmp;
}
}
}
vmax = __shfl(vmax, blockDim.x - 1);
imax = __shfl(imax, blockDim.x - 1);
__syncthreads();
if (threadIdx.x == threadIdx.y) {
maxv[threadIdx.y] = vmax;
maxi[threadIdx.y] = imax;
}
__syncthreads();
if (threadIdx.y == 0) {
vmax = maxv[threadIdx.x];
imax = maxi[threadIdx.x];
}
__syncthreads();
if (threadIdx.y == 0) {
for (k = 1; k < blockDim.y; k *= 2) {
vtmp = __shfl_up(vmax, k);
itmp = __shfl_up(imax, k);
if (threadIdx.x >= k) {
if (dir ? (vtmp > vmax) : (vtmp < vmax)) {
vmax = vtmp;
imax = itmp;
}
}
}
if (threadIdx.x == blockDim.y - 1) {
out[bid] = vmax;
outi[bid] = imax;
}
}
__syncthreads();
}
}
// Not very fast for wide matrices
template<class T>
__global__ void __maxmini_rows(T *in, T *out, int *outi, int nrows, int ncols, int dir) {
T vmax, vtmp;
int imax, itmp, i, j;
for (i = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * blockIdx.x); i < nrows; i += blockDim.x * blockDim.y * gridDim.x) {
if (ncols > 0) {
vmax = in[i];
imax = 0;
for (j = 1; j < ncols; j++) {
vtmp = in[i + nrows * j];
itmp = j;
if (dir ? (vtmp > vmax) : (vtmp < vmax)) {
vmax = vtmp;
imax = itmp;
}
}
out[i] = vmax;
outi[i] = imax;
}
}
}
#else
template<class T>
__global__ void __cumsumg(T *in, T *out, int *jc, int nrows, int ncols, int m) {}
template<class T>
__global__ void __maxming(T *in, T *out, int *outi, int *jc, int nrows, int ncols, int m, T minv, int dir) {}
template<class T>
__global__ void __maxmini_cols(T *in, T *out, int *outi, int nrows, int ncols, T minv, int dir) {}
template<class T>
__global__ void __maxmini_rows(T *in, T *out, int *outi, int nrows, int ncols, int dir) {}
#endif
void setindsD(int ncols, int &nc1, int &nc2) {
if (ncols < 65536) {
nc1 = ncols;
nc2 = 1;
} else {
nc1 = (int)sqrt((double)ncols);
nc2 = 1 + (ncols-1)/nc1;
}
}
template<class T>
int cumsumg(T *in, T *out, int *jc, int nrows, int ncols, int m) {
int nc1, nc2;
setindsD(ncols, nc1, nc2);
dim3 grid(min(64, m), nc1, nc2);
int ny = min(32, 1+nrows/m/32);
dim3 tblock(32, ny, 1);
__cumsumg<T><<<grid,tblock>>>(in, out, jc, nrows, ncols, m);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
int cumsumgf(double *in, double *out, int *jc, int nrows, int ncols, int m) {
return cumsumg<double>(in, out, jc, nrows, ncols, m);
}
template<class T>
int maxming(T *in, T *out, int *outi, int *jc, int nrows, int ncols, int m, T minv, int dir) {
int nc1, nc2;
setindsD(ncols, nc1, nc2);
dim3 grid(min(64, m), nc1, nc2);
int ny = min(32, 1+nrows/m/32);
dim3 tblock(32, ny, 1);
__maxming<T><<<grid,tblock>>>(in, out, outi, jc, nrows, ncols, m, minv, dir);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
// JFC: problem here ncols a non-multiple of 16, and nrows < 32.
template<class T>
int maxmini_cols(T *in, T *out, int *outi, int nrows, int ncols, T minv, int dir) {
int nc1, nc2;
setindsD(ncols, nc1, nc2);
dim3 grid(nc1, nc2, 1);
int ny = min(32, 1+nrows/32);
dim3 tblock(32, ny, 1);
__maxmini_cols<T><<<grid,tblock>>>(in, out, outi, nrows, ncols, minv, dir);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
template<class T>
int maxmini_rows(T *in, T *out, int *outi, int nrows, int ncols, int dir) {
int nb = min(32,1+nrows/32);
dim3 grid(nb,1,1);
int ny = min(32, 1+nrows/nb/32);
dim3 tblock(32, ny, 1);
__maxmini_rows<T><<<grid,tblock>>>(in, out, outi, nrows, ncols, dir);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
int maxgf(double *in, double *out, int *outi, int *jc, int nrows, int ncols, int m) {
return maxming<double>(in, out, outi, jc, nrows, ncols, m, -3e38f, 1);
}
int mingf(double *in, double *out, int *outi, int *jc, int nrows, int ncols, int m) {
return maxming<double>(in, out, outi, jc, nrows, ncols, m, 3e38f, 0);
}
int maxif(double *in, double *out, int *outi, int nrows, int ncols, int dir) {
if (dir == 1) {
return maxmini_cols<double>(in, out, outi, nrows, ncols, -3e38f, 1);
} else if (dir == 2) {
return maxmini_rows<double>(in, out, outi, nrows, ncols, 1);
} else {
return -1;
}
}
int minif(double *in, double *out, int *outi, int nrows, int ncols, int dir) {
if (dir == 1) {
return maxmini_cols<double>(in, out, outi, nrows, ncols, 3e38f, 0);
} else if (dir == 2) {
return maxmini_rows<double>(in, out, outi, nrows, ncols, 0);
} else {
return -1;
}
}
__global__ void __dmv(double *a, int nrows, int ncols, double *b, double *c) {
for (int tx = threadIdx.x + blockDim.x * blockIdx.x; tx < nrows; tx += blockDim.x * gridDim.x) {
double accum = 0.0;
for (int ty = threadIdx.y + blockDim.y * blockIdx.y; ty < ncols; ty += blockDim.y * gridDim.y) {
accum += a[tx+nrows*ty] * b[ty];
}
atomicAdd(&c[tx], accum);
}
}
#if __CUDA_ARCH__ > 200
__global__ void __dmvt(double *a, int nrows, int ncols, double *b, double *c) {
for (int ty = threadIdx.y + blockDim.y * blockIdx.y; ty < ncols; ty += blockDim.y * gridDim.y) {
double accum = 0.0f;
for (int tx = threadIdx.x + blockDim.x * blockIdx.x; tx < nrows; tx += blockDim.x * gridDim.x) {
accum += a[tx+nrows*ty] * b[tx];
}
for (int i = 1; i < blockDim.x; i *= 2) {
double tmp = __shfl_down(accum, i);
if (threadIdx.x + i < blockDim.x) accum += tmp;
}
if (threadIdx.x == 0) {
atomicAdd(&c[ty], accum);
}
}
}
#else
__global__ void __dmvt(double *a, int nrows, int ncols, double *b, double *c) {
for (int ty = threadIdx.y + blockDim.y * blockIdx.y; ty < ncols; ty += blockDim.y * gridDim.y) {
double accum = 0.0;
for (int tx = threadIdx.x + blockDim.x * blockIdx.x; tx < nrows; tx += blockDim.x * gridDim.x) {
accum += a[tx+nrows*ty] * b[tx];
}
atomicAdd(&c[ty], accum);
}
}
#endif
__global__ void __dmv0(double *a, int nrows, int ncols, int tstep, double *b, double *c) {
double accum = 0.0f;
int tx = threadIdx.x + blockDim.x * blockIdx.x;
if (tx < tstep) {
for (; tx < nrows*ncols; tx += tstep) {
int icol = tx / nrows;
accum += a[tx] * b[icol];
}
int irow = tx % nrows;
atomicAdd(&c[irow], accum);
}
}
int dmv(double *a, int nrows, int ncols, double *b, double *c, int trans) {
if (trans == 1) {
int ntx = min(32, nrows);
int nty = min(32, ncols);
int nbx = min(256, 1 + nrows/ntx/8);
int nby = min(256, 1 + ncols/nty/2);
dim3 blockdims(ntx,nty,1);
dim3 griddims(nbx,nby,1);
__dmvt<<<griddims,blockdims>>>(a, nrows, ncols, b, c);
} else {
int ntx = min(1024, nrows*ncols);
int nbx = max(1+(nrows-1)/ntx, nrows*ncols/ntx/32);
int tstep = (ntx*nbx/nrows)*nrows;
__dmv0<<<nbx,ntx>>>(a, nrows, ncols, tstep, b, c);
}
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
#define ACCUM_KERNEL(TI,TJ,TV,TS,II,IJ,IV) \
__global__ void __accum(TI, TJ, TV, TS, int m, int nrows) { \
int istart = ((int)(((long long)blockIdx.x) * m / gridDim.x)); \
int iend = ((int)(((long long)blockIdx.x + 1) * m / gridDim.x)); \
istart = (istart / 32) * 32; \
if (blockIdx.x != gridDim.x - 1) { \
iend = (iend / 32) * 32; \
} \
for (int i = istart + threadIdx.x; i < iend; i+= blockDim.x) { \
atomicAdd(&S[II + nrows * IJ], IV); \
} \
} \
int accum(TI, TJ, TV, TS, int m, int nrows) { \
int nthreads = max(32, min(512, m)); \
int nblocks = max(1, min(65535, m/nthreads/8)); \
__accum<<<nblocks,nthreads>>>(I,J,V,S,m,nrows); \
cudaStreamSynchronize(SYNC_STREAM); \
cudaError_t err = cudaGetLastError(); \
return err; \
}
ACCUM_KERNEL(int*I, int*J, double*V, double*S, I[i], J[i], V[i])
ACCUM_KERNEL(int*I, int J, double*V, double*S, I[i], J, V[i])
ACCUM_KERNEL(int I, int*J, double*V, double*S, I, J[i], V[i])
ACCUM_KERNEL(int*I, int*J, double V, double*S, I[i], J[i], V)
ACCUM_KERNEL(int*I, int J, double V, double*S, I[i], J, V)
ACCUM_KERNEL(int I, int*J, double V, double*S, I, J[i], V)
const int INBLOCK = 4;
// copy and transpose columns of the input matrix into the output matrix. nrows refers to the input matrix
// (and so is ncols for the output). ncols is the length of the iptrs array, which will be the number of
// rows of the output matrix. iptrs specifies the columns of the input array to copy.
// outstride is stride of the output matrix
__global__ void __icopy_transpose(int *iptrs, double *in, double *out, int outstride, int nrows, int ncols) {
__shared__ double tile[BLOCKDIM][BLOCKDIM+1];
int nx = BLOCKDIM * gridDim.x;
int ny = BLOCKDIM * gridDim.y;
int ix = BLOCKDIM * blockIdx.x;
int iy = BLOCKDIM * blockIdx.y;
for (int yb = iy; yb < ncols; yb += ny) {
for (int xb = ix; xb < nrows; xb += nx) {
if (xb + threadIdx.x < nrows) {
int ylim = min(ncols, yb + BLOCKDIM);
for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) {
tile[threadIdx.x][y-yb] = in[threadIdx.x + xb + iptrs[y]*nrows];
}
}
__syncthreads();
if (yb + threadIdx.x < ncols) {
int xlim = min(nrows, xb + BLOCKDIM);
for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) {
out[threadIdx.x + yb + x*outstride] = tile[x-xb][threadIdx.x];
}
}
__syncthreads();
}
}
}
int icopy_transpose(int *iptrs, double *in, double *out, int stride, int nrows, int ncols) {
const dim3 griddims(20,256,1);
const dim3 blockdims(BLOCKDIM,INBLOCK,1);
cudaError_t err;
__icopy_transpose<<<griddims,blockdims>>>(iptrs, in, out, stride, nrows, ncols);
cudaStreamSynchronize(SYNC_STREAM);
err = cudaGetLastError();
if (err != cudaSuccess) {fprintf(stderr, "cuda error in icopy_transpose"); return err;}
return 0;
}
// copy and transpose the input matrix into columns of the output matrix. nrows, ncols refer to output matrix
__global__ void __ocopy_transpose(int *optrs, double *in, double *out, int instride, int nrows, int ncols) {
int nx = BLOCKDIM * gridDim.x;
int ny = BLOCKDIM * gridDim.y;
int ix = BLOCKDIM * blockIdx.x;
int iy = BLOCKDIM * blockIdx.y;
__shared__ double tile[BLOCKDIM][BLOCKDIM+1];
for (int yb = iy; yb < ncols; yb += ny) {
for (int xb = ix; xb < nrows; xb += nx) {
if (yb + threadIdx.x < ncols) {
int xlim = min(nrows, xb + BLOCKDIM);
for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) {
tile[x-xb][threadIdx.x] = in[threadIdx.x + yb + x*instride];
}
}
__syncthreads();
if (xb + threadIdx.x < nrows) {
int ylim = min(ncols, yb + BLOCKDIM);
for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) {
out[optrs[y]*nrows + threadIdx.x + xb] = tile[threadIdx.x][y-yb];
}
}
__syncthreads();
}
}
}
__global__ void __ocopy_transpose_add(int *optrs, double *in, double *out, int instride, int nrows, int ncols) {
int nx = BLOCKDIM * gridDim.x;
int ny = BLOCKDIM * gridDim.y;
int ix = BLOCKDIM * blockIdx.x;
int iy = BLOCKDIM * blockIdx.y;
__shared__ double tile[BLOCKDIM][BLOCKDIM+1];
for (int yb = iy; yb < ncols; yb += ny) {
for (int xb = ix; xb < nrows; xb += nx) {
if (yb + threadIdx.x < ncols) {
int xlim = min(nrows, xb + BLOCKDIM);
for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) {
tile[x-xb][threadIdx.x] = in[threadIdx.x + yb + x*instride];
}
}
__syncthreads();
if (xb + threadIdx.x < nrows) {
int ylim = min(ncols, yb + BLOCKDIM);
for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) {
atomicAdd(&out[optrs[y]*nrows + threadIdx.x + xb], tile[threadIdx.x][y-yb]);
}
}
__syncthreads();
}
}
}
__global__ void __ocopy_transpose_min(int *optrs, double *in, double *out, int instride, int nrows, int ncols) {
int nx = BLOCKDIM * gridDim.x;
int ny = BLOCKDIM * gridDim.y;
int ix = BLOCKDIM * blockIdx.x;
int iy = BLOCKDIM * blockIdx.y;
__shared__ double tile[BLOCKDIM][BLOCKDIM+1];
for (int yb = iy; yb < ncols; yb += ny) {
for (int xb = ix; xb < nrows; xb += nx) {
if (yb + threadIdx.x < ncols) {
int xlim = min(nrows, xb + BLOCKDIM);
for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) {
tile[x-xb][threadIdx.x] = in[threadIdx.x + yb + x*instride];
}
}
__syncthreads();
if (xb + threadIdx.x < nrows) {
int ylim = min(ncols, yb + BLOCKDIM);
for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) {
atomicMin((int *)&out[optrs[y]*nrows + threadIdx.x + xb], *(int *)(&tile[threadIdx.x][y-yb]));
}
}
__syncthreads();
}
}
}
int ocopy_transpose_add(int *optrs, double *in, double *out, int stride, int nrows, int ncols) {
const dim3 griddims(20,256,1);
const dim3 blockdims(BLOCKDIM,INBLOCK,1);
cudaError_t err;
__ocopy_transpose_add<<<griddims,blockdims>>>(optrs, in, out, stride, nrows, ncols);
cudaStreamSynchronize(SYNC_STREAM);
err = cudaGetLastError();
if (err != cudaSuccess) {fprintf(stderr, "cuda error in ocopy_transpose"); return err;}
return 0;
}
int ocopy_transpose(int *optrs, double *in, double *out, int stride, int nrows, int ncols) {
const dim3 griddims(20,256,1);
const dim3 blockdims(BLOCKDIM,INBLOCK,1);
cudaError_t err;
__ocopy_transpose<<<griddims,blockdims>>>(optrs, in, out, stride, nrows, ncols);
cudaStreamSynchronize(SYNC_STREAM);
err = cudaGetLastError();
if (err != cudaSuccess) {fprintf(stderr, "cuda error in ocopy_transpose"); return err;}
return 0;
}
int ocopy_transpose_min(int *optrs, double *in, double *out, int stride, int nrows, int ncols) {
const dim3 griddims(20,256,1);
const dim3 blockdims(BLOCKDIM,INBLOCK,1);
cudaError_t err;
__ocopy_transpose_min<<<griddims,blockdims>>>(optrs, in, out, stride, nrows, ncols);
cudaStreamSynchronize(SYNC_STREAM);
err = cudaGetLastError();
if (err != cudaSuccess) {fprintf(stderr, "cuda error in ocopy_transpose"); return err;}
return 0;
}
#ifdef TEST
int main(int argc, char **argv) {
int m=8, n=8, opn = 0;
double *dA, *dB, *dC, *A, *B, *C;
if (argc > 1) {
sscanf(argv[1], "%d", &opn);
if (argc > 2) {
sscanf(argv[2], "%d", &m);
if (argc > 3) {
sscanf(argv[3], "%d", &n);
}
}
}
A = (double *)malloc(m*n*sizeof(double));
B = (double *)malloc(m*n*sizeof(double));
C = (double *)malloc(m*n*sizeof(double));
cudaMalloc((void**)&dA, m*n*sizeof(double));
cudaMalloc((void**)&dB, m*n*sizeof(double));
cudaMalloc((void**)&dC, m*n*sizeof(double));
for (int i = 0; i < m*n; i++) {
A[i] = 1.0f;
B[i] = 2.0f;
}
cudaMemcpy(dA, A, m*n*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dB, B, m*n*sizeof(double), cudaMemcpyHostToDevice);
printf("A %f %f %f %f\n", A[0], A[1], A[2], A[3]);
printf("B %f %f %f %f\n", B[0], B[1], B[2], B[3]);
MatKernel(dA, m, n, dB, m, n, dC, opn);
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
fprintf(stderr, "CUDA error %d", err);
exit(1);
}
cudaMemcpy(C, dC, m*n*sizeof(double), cudaMemcpyDeviceToHost);
printf("C %f %f %f %f\n", C[0], C[1], C[2], C[3]);
printf("A %f %f %f %f\n", A[0], A[1], A[2], A[3]);
printf("B %f %f %f %f\n", B[0], B[1], B[2], B[3]);
if (dA != NULL) cudaFree(dA);
if (dB != NULL) cudaFree(dB);
if (dC != NULL) cudaFree(dC);
if (C != NULL) free(C);
}
#endif
// Cumulative sum of columns
#if __CUDA_ARCH__ >= 300
__global__ void __cumsumc(int nrows, int ncols, double *A, double *B) {
int i, j, k, lim;
double v, w, sum;
int icol = threadIdx.y + blockDim.y * blockIdx.x;
__syncthreads();
for (i = icol; i < ncols; i += blockDim.y * gridDim.x) {
sum = 0.0f;
for (j = 0; j < nrows; j += blockDim.x) {
v = 0;
if (j + threadIdx.x < nrows) {
v = A[j + threadIdx.x + i * nrows];
}
lim = min(blockDim.x, nrows - j);
#pragma unroll
for (k = 1; k < lim; k = k + k) {
w = __shfl_up(v, k);
if (threadIdx.x >= k) {
v += w;
}
}
v += sum;
if (j + threadIdx.x < nrows) {
B[j + threadIdx.x + i * nrows] = v;
}
sum = __shfl(v, blockDim.x - 1);
}
}
}
#else
__global__ void __cumsumc(int nrows, int ncols, double *A, double *B) {
__shared__ double buff[32];
int i, j, k, lim;
double v, sum;
int icol = threadIdx.y + blockDim.y * blockIdx.x;
__syncthreads();
for (i = icol; i < ncols; i += blockDim.y * gridDim.x) {
sum = 0.0f;
for (j = 0; j < nrows; j += blockDim.x) {
v = 0;
if (j + threadIdx.x < nrows) {
v = A[j + threadIdx.x + i * nrows];
}
__syncthreads();
buff[threadIdx.x] = v;
lim = min(blockDim.x, nrows - j);
#pragma unroll
for (k = 1; k < lim; k = k + k) {
__syncthreads();
if (threadIdx.x >= k) {
v += buff[threadIdx.x - k];
}
__syncthreads();
buff[threadIdx.x] = v;
}
v += sum;
if (j + threadIdx.x < nrows) {
B[j + threadIdx.x + i * nrows] = v;
}
__syncthreads();
sum = buff[31];
__syncthreads();
}
}
}
#endif
int cumsumc(int nrows, int ncols, double *A, double *B) {
if (ncols == 1) {
thrust::device_ptr<double> pa(A);
thrust::device_ptr<double> pb(B);
thrust::inclusive_scan(pa, pa + nrows, pb);
} else {
dim3 threads;
threads.x = 32;
threads.y = min(32, ncols);
int nblocks = min(64, 1 + (ncols-1)/threads.y);
__cumsumc<<<nblocks,threads>>>(nrows, ncols, A, B);
}
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
int inclusive_scan_by_key_dd(double *fvals, double *fkeys, double *fout, long long len) {
thrust::device_ptr<double> vals(fvals);
thrust::device_ptr<double> keys(fkeys);
thrust::device_ptr<double> out(fout);
thrust::inclusive_scan_by_key(keys, keys+len, vals, out);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
int inclusive_scan_by_key_ll(long long *fvals, long long *fkeys, long long *fout, long long len) {
thrust::device_ptr<long long> vals(fvals);
thrust::device_ptr<long long> keys(fkeys);
thrust::device_ptr<long long> out(fout);
thrust::inclusive_scan_by_key(keys, keys+len, vals, out);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
int reverse(double *fvals, double *fout, long long len) {
thrust::device_ptr<double> vals(fvals);
thrust::device_ptr<double> out(fout);
thrust::reverse_copy(vals, vals+len, out);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
|
85bd66b53f26a91e19f101f25c636cb4ebe06c8d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
__device__ double doBinomial(int n, double p,double *randomNumbers, hiprandGenerator_t s) {
int x = 0;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for(int i = tid; i < n; i++) {
if(randomNumbers[i] < p )
x++;
}
return x;
}
extern "C"
__global__ void binomial_scalar_double(int len,int n,double ps,double *randomNumbers,double *result, hiprandGenerator_t s) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for(int i = tid; i < len; i += blockDim.x*gridDim.x) {
result[i] = doBinomial(n,ps,randomNumbers,s);
}
}
|
85bd66b53f26a91e19f101f25c636cb4ebe06c8d.cu
|
#include <cuda_runtime.h>
#include <curand.h>
__device__ double doBinomial(int n, double p,double *randomNumbers, curandGenerator_t s) {
int x = 0;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for(int i = tid; i < n; i++) {
if(randomNumbers[i] < p )
x++;
}
return x;
}
extern "C"
__global__ void binomial_scalar_double(int len,int n,double ps,double *randomNumbers,double *result, curandGenerator_t s) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for(int i = tid; i < len; i += blockDim.x*gridDim.x) {
result[i] = doBinomial(n,ps,randomNumbers,s);
}
}
|
e55e33158c81de2cba7e121788e8ccbda7348923.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.py
//
//user function
__device__
inline void res_calc_gpu(const double *x1, const double *x2, const double *q1, const double *q2,
const double *adt1, const double *adt2, double *res1, double *res2) {
double dx,dy,mu, ri, p1,vol1, p2,vol2, f;
dx = x1[0] - x2[0];
dy = x1[1] - x2[1];
ri = 1.0f/q1[0];
p1 = gm1*(q1[3]-0.5f*ri*(q1[1]*q1[1]+q1[2]*q1[2]));
vol1 = ri*(q1[1]*dy - q1[2]*dx);
ri = 1.0f/q2[0];
p2 = gm1*(q2[3]-0.5f*ri*(q2[1]*q2[1]+q2[2]*q2[2]));
vol2 = ri*(q2[1]*dy - q2[2]*dx);
mu = 0.5f*((*adt1)+(*adt2))*eps;
f = 0.5f*(vol1* q1[0] + vol2* q2[0] ) + mu*(q1[0]-q2[0]);
res1[0] += f;
res2[0] -= f;
f = 0.5f*(vol1* q1[1] + p1*dy + vol2* q2[1] + p2*dy) + mu*(q1[1]-q2[1]);
res1[1] += f;
res2[1] -= f;
f = 0.5f*(vol1* q1[2] - p1*dx + vol2* q2[2] - p2*dx) + mu*(q1[2]-q2[2]);
res1[2] += f;
res2[2] -= f;
f = 0.5f*(vol1*(q1[3]+p1) + vol2*(q2[3]+p2) ) + mu*(q1[3]-q2[3]);
res1[3] += f;
res2[3] -= f;
}
// CUDA kernel function
__global__ void op_cuda_res_calc(
const double *__restrict ind_arg0,
const double *__restrict ind_arg1,
const double *__restrict ind_arg2,
double *__restrict ind_arg3,
const int *__restrict opDat0Map,
const int *__restrict opDat2Map,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors,
int nblocks,
int set_size) {
double arg6_l[4];
double arg7_l[4];
__shared__ int nelems2, ncolor;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) {
return;
}
if (threadIdx.x==0) {
//get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x);
ncolor = ncolors[blockId];
}
__syncthreads(); // make sure all of above completed
for ( int n=threadIdx.x; n<nelems2; n = n+=blockDim.x ){
int col2 = -1;
int map0idx;
int map1idx;
int map2idx;
int map3idx;
if (n<nelem) {
//initialise local variables
for ( int d=0; d<4; d++ ){
arg6_l[d] = ZERO_double;
}
for ( int d=0; d<4; d++ ){
arg7_l[d] = ZERO_double;
}
map0idx = opDat0Map[n + offset_b + set_size * 0];
map1idx = opDat0Map[n + offset_b + set_size * 1];
map2idx = opDat2Map[n + offset_b + set_size * 0];
map3idx = opDat2Map[n + offset_b + set_size * 1];
//user-supplied kernel call
res_calc_gpu(ind_arg0+map0idx*2,
ind_arg0+map1idx*2,
ind_arg1+map2idx*4,
ind_arg1+map3idx*4,
ind_arg2+map2idx*1,
ind_arg2+map3idx*1,
arg6_l,
arg7_l);
col2 = colors[n+offset_b];
}
//store local variables
for ( int col=0; col<ncolor; col++ ){
if (col2==col) {
arg6_l[0] += ind_arg3[0+map2idx*4];
arg6_l[1] += ind_arg3[1+map2idx*4];
arg6_l[2] += ind_arg3[2+map2idx*4];
arg6_l[3] += ind_arg3[3+map2idx*4];
arg7_l[0] += ind_arg3[0+map3idx*4];
arg7_l[1] += ind_arg3[1+map3idx*4];
arg7_l[2] += ind_arg3[2+map3idx*4];
arg7_l[3] += ind_arg3[3+map3idx*4];
ind_arg3[0+map2idx*4] = arg6_l[0];
ind_arg3[1+map2idx*4] = arg6_l[1];
ind_arg3[2+map2idx*4] = arg6_l[2];
ind_arg3[3+map2idx*4] = arg6_l[3];
ind_arg3[0+map3idx*4] = arg7_l[0];
ind_arg3[1+map3idx*4] = arg7_l[1];
ind_arg3[2+map3idx*4] = arg7_l[2];
ind_arg3[3+map3idx*4] = arg7_l[3];
}
__syncthreads();
}
}
}
//GPU host stub function
void op_par_loop_res_calc_gpu(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5,
op_arg arg6,
op_arg arg7){
int nargs = 8;
op_arg args[8];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
args[4] = arg4;
args[5] = arg5;
args[6] = arg6;
args[7] = arg7;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(2);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[2].name = name;
OP_kernels[2].count += 1;
if (OP_kernels[2].count==1) op_register_strides();
int ninds = 4;
int inds[8] = {0,0,1,1,2,2,3,3};
if (OP_diags>2) {
printf(" kernel routine with indirection: res_calc\n");
}
//get plan
#ifdef OP_PART_SIZE_2
int part_size = OP_PART_SIZE_2;
#else
int part_size = OP_part_size;
#endif
int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
//execute plan
int block_offset = 0;
for ( int col=0; col<Plan->ncolors; col++ ){
if (col==Plan->ncolors_core) {
op_mpi_wait_all_cuda(nargs, args);
}
#ifdef OP_BLOCK_SIZE_2
int nthread = OP_BLOCK_SIZE_2;
#else
int nthread = OP_block_size;
#endif
dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col],
Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1);
if (Plan->ncolblk[col] > 0) {
hipLaunchKernelGGL(( op_cuda_res_calc), dim3(nblocks),dim3(nthread), 0, 0,
(double *)arg0.data_d,
(double *)arg2.data_d,
(double *)arg4.data_d,
(double *)arg6.data_d,
arg0.map_data_d,
arg2.map_data_d,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol,
Plan->ncolblk[col],
set->size+set->exec_size);
}
block_offset += Plan->ncolblk[col];
}
OP_kernels[2].transfer += Plan->transfer;
OP_kernels[2].transfer2 += Plan->transfer2;
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(hipDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[2].time += wall_t2 - wall_t1;
}
void op_par_loop_res_calc_cpu(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5,
op_arg arg6,
op_arg arg7);
//GPU host stub function
#if OP_HYBRID_GPU
void op_par_loop_res_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5,
op_arg arg6,
op_arg arg7){
if (OP_hybrid_gpu) {
op_par_loop_res_calc_gpu(name, set,
arg0,
arg1,
arg2,
arg3,
arg4,
arg5,
arg6,
arg7);
}else{
op_par_loop_res_calc_cpu(name, set,
arg0,
arg1,
arg2,
arg3,
arg4,
arg5,
arg6,
arg7);
}
}
#else
void op_par_loop_res_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5,
op_arg arg6,
op_arg arg7){
op_par_loop_res_calc_gpu(name, set,
arg0,
arg1,
arg2,
arg3,
arg4,
arg5,
arg6,
arg7);
}
#endif //OP_HYBRID_GPU
|
e55e33158c81de2cba7e121788e8ccbda7348923.cu
|
//
// auto-generated by op2.py
//
//user function
__device__
inline void res_calc_gpu(const double *x1, const double *x2, const double *q1, const double *q2,
const double *adt1, const double *adt2, double *res1, double *res2) {
double dx,dy,mu, ri, p1,vol1, p2,vol2, f;
dx = x1[0] - x2[0];
dy = x1[1] - x2[1];
ri = 1.0f/q1[0];
p1 = gm1*(q1[3]-0.5f*ri*(q1[1]*q1[1]+q1[2]*q1[2]));
vol1 = ri*(q1[1]*dy - q1[2]*dx);
ri = 1.0f/q2[0];
p2 = gm1*(q2[3]-0.5f*ri*(q2[1]*q2[1]+q2[2]*q2[2]));
vol2 = ri*(q2[1]*dy - q2[2]*dx);
mu = 0.5f*((*adt1)+(*adt2))*eps;
f = 0.5f*(vol1* q1[0] + vol2* q2[0] ) + mu*(q1[0]-q2[0]);
res1[0] += f;
res2[0] -= f;
f = 0.5f*(vol1* q1[1] + p1*dy + vol2* q2[1] + p2*dy) + mu*(q1[1]-q2[1]);
res1[1] += f;
res2[1] -= f;
f = 0.5f*(vol1* q1[2] - p1*dx + vol2* q2[2] - p2*dx) + mu*(q1[2]-q2[2]);
res1[2] += f;
res2[2] -= f;
f = 0.5f*(vol1*(q1[3]+p1) + vol2*(q2[3]+p2) ) + mu*(q1[3]-q2[3]);
res1[3] += f;
res2[3] -= f;
}
// CUDA kernel function
__global__ void op_cuda_res_calc(
const double *__restrict ind_arg0,
const double *__restrict ind_arg1,
const double *__restrict ind_arg2,
double *__restrict ind_arg3,
const int *__restrict opDat0Map,
const int *__restrict opDat2Map,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors,
int nblocks,
int set_size) {
double arg6_l[4];
double arg7_l[4];
__shared__ int nelems2, ncolor;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) {
return;
}
if (threadIdx.x==0) {
//get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x);
ncolor = ncolors[blockId];
}
__syncthreads(); // make sure all of above completed
for ( int n=threadIdx.x; n<nelems2; n = n+=blockDim.x ){
int col2 = -1;
int map0idx;
int map1idx;
int map2idx;
int map3idx;
if (n<nelem) {
//initialise local variables
for ( int d=0; d<4; d++ ){
arg6_l[d] = ZERO_double;
}
for ( int d=0; d<4; d++ ){
arg7_l[d] = ZERO_double;
}
map0idx = opDat0Map[n + offset_b + set_size * 0];
map1idx = opDat0Map[n + offset_b + set_size * 1];
map2idx = opDat2Map[n + offset_b + set_size * 0];
map3idx = opDat2Map[n + offset_b + set_size * 1];
//user-supplied kernel call
res_calc_gpu(ind_arg0+map0idx*2,
ind_arg0+map1idx*2,
ind_arg1+map2idx*4,
ind_arg1+map3idx*4,
ind_arg2+map2idx*1,
ind_arg2+map3idx*1,
arg6_l,
arg7_l);
col2 = colors[n+offset_b];
}
//store local variables
for ( int col=0; col<ncolor; col++ ){
if (col2==col) {
arg6_l[0] += ind_arg3[0+map2idx*4];
arg6_l[1] += ind_arg3[1+map2idx*4];
arg6_l[2] += ind_arg3[2+map2idx*4];
arg6_l[3] += ind_arg3[3+map2idx*4];
arg7_l[0] += ind_arg3[0+map3idx*4];
arg7_l[1] += ind_arg3[1+map3idx*4];
arg7_l[2] += ind_arg3[2+map3idx*4];
arg7_l[3] += ind_arg3[3+map3idx*4];
ind_arg3[0+map2idx*4] = arg6_l[0];
ind_arg3[1+map2idx*4] = arg6_l[1];
ind_arg3[2+map2idx*4] = arg6_l[2];
ind_arg3[3+map2idx*4] = arg6_l[3];
ind_arg3[0+map3idx*4] = arg7_l[0];
ind_arg3[1+map3idx*4] = arg7_l[1];
ind_arg3[2+map3idx*4] = arg7_l[2];
ind_arg3[3+map3idx*4] = arg7_l[3];
}
__syncthreads();
}
}
}
//GPU host stub function
void op_par_loop_res_calc_gpu(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5,
op_arg arg6,
op_arg arg7){
int nargs = 8;
op_arg args[8];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
args[4] = arg4;
args[5] = arg5;
args[6] = arg6;
args[7] = arg7;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(2);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[2].name = name;
OP_kernels[2].count += 1;
if (OP_kernels[2].count==1) op_register_strides();
int ninds = 4;
int inds[8] = {0,0,1,1,2,2,3,3};
if (OP_diags>2) {
printf(" kernel routine with indirection: res_calc\n");
}
//get plan
#ifdef OP_PART_SIZE_2
int part_size = OP_PART_SIZE_2;
#else
int part_size = OP_part_size;
#endif
int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
//execute plan
int block_offset = 0;
for ( int col=0; col<Plan->ncolors; col++ ){
if (col==Plan->ncolors_core) {
op_mpi_wait_all_cuda(nargs, args);
}
#ifdef OP_BLOCK_SIZE_2
int nthread = OP_BLOCK_SIZE_2;
#else
int nthread = OP_block_size;
#endif
dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col],
Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1);
if (Plan->ncolblk[col] > 0) {
op_cuda_res_calc<<<nblocks,nthread>>>(
(double *)arg0.data_d,
(double *)arg2.data_d,
(double *)arg4.data_d,
(double *)arg6.data_d,
arg0.map_data_d,
arg2.map_data_d,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol,
Plan->ncolblk[col],
set->size+set->exec_size);
}
block_offset += Plan->ncolblk[col];
}
OP_kernels[2].transfer += Plan->transfer;
OP_kernels[2].transfer2 += Plan->transfer2;
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(cudaDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[2].time += wall_t2 - wall_t1;
}
void op_par_loop_res_calc_cpu(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5,
op_arg arg6,
op_arg arg7);
//GPU host stub function
#if OP_HYBRID_GPU
void op_par_loop_res_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5,
op_arg arg6,
op_arg arg7){
if (OP_hybrid_gpu) {
op_par_loop_res_calc_gpu(name, set,
arg0,
arg1,
arg2,
arg3,
arg4,
arg5,
arg6,
arg7);
}else{
op_par_loop_res_calc_cpu(name, set,
arg0,
arg1,
arg2,
arg3,
arg4,
arg5,
arg6,
arg7);
}
}
#else
void op_par_loop_res_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5,
op_arg arg6,
op_arg arg7){
op_par_loop_res_calc_gpu(name, set,
arg0,
arg1,
arg2,
arg3,
arg4,
arg5,
arg6,
arg7);
}
#endif //OP_HYBRID_GPU
|
c5d2cb4d87539061993530bed445e45b2c3acc9a.hip
|
// !!! This is a file automatically generated by hipify!!!
#ifndef CORNERTURN_KERNEL_H_
#define CORNERTURN_KERNEL_H_
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "AstroAccelerate/params.h"
//{{{ corner_turn
__global__ void simple_corner_turn_kernel(unsigned short *d_input, float *d_output, int nchans, int nsamp)
{
int t = blockIdx.x * blockDim.x + threadIdx.x;
int c = blockIdx.y * blockDim.y + threadIdx.y;
d_output[c * nsamp + t] = (float) __ldg(&d_input[t * nchans + c]);
}
__global__ void swap(unsigned short *d_input, float *d_output, int nchans, int nsamp)
{
int t = blockIdx.x * blockDim.x + threadIdx.x;
int c = blockIdx.y * blockDim.y + threadIdx.y;
d_input[c * nsamp + t] = (unsigned short) __ldg(&d_output[c * nsamp + t]);
}
//}}}
#endif
|
c5d2cb4d87539061993530bed445e45b2c3acc9a.cu
|
#ifndef CORNERTURN_KERNEL_H_
#define CORNERTURN_KERNEL_H_
#include <cuda.h>
#include <cuda_runtime.h>
#include "AstroAccelerate/params.h"
//{{{ corner_turn
__global__ void simple_corner_turn_kernel(unsigned short *d_input, float *d_output, int nchans, int nsamp)
{
int t = blockIdx.x * blockDim.x + threadIdx.x;
int c = blockIdx.y * blockDim.y + threadIdx.y;
d_output[c * nsamp + t] = (float) __ldg(&d_input[t * nchans + c]);
}
__global__ void swap(unsigned short *d_input, float *d_output, int nchans, int nsamp)
{
int t = blockIdx.x * blockDim.x + threadIdx.x;
int c = blockIdx.y * blockDim.y + threadIdx.y;
d_input[c * nsamp + t] = (unsigned short) __ldg(&d_output[c * nsamp + t]);
}
//}}}
#endif
|
510289562ccc3895ccc8791b087f62d82a219326.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../shared/timer.hpp"
#include "../shared/tigr_utilities.hpp"
#include "../shared/graph.hpp"
#include "../shared/virtual_graph.hpp"
#include "../shared/globals.hpp"
#include "../shared/argument_parsing.hpp"
#include "../shared/gpu_error_check.cuh"
__global__ void kernel(unsigned int numParts,
unsigned int *nodePointer,
PartPointer *partNodePointer,
unsigned int *edgeList,
unsigned int *dist,
bool *finished,
int level)
{
unsigned int partId = blockDim.x * blockIdx.x + threadIdx.x;
if(partId < numParts)
{
unsigned int id = partNodePointer[partId].node; // original graph node index
unsigned int part = partNodePointer[partId].part; // node's partition number
if(dist[id] != level)
return;
unsigned int thisPointer = nodePointer[id]; // index in edge list for node ID
unsigned int degree = edgeList[thisPointer]; // degree of the node
unsigned int numParts;
if(degree % Part_Size == 0) // Recalculating the number of partitions required for this node
numParts = degree / Part_Size ;
else
numParts = degree / Part_Size + 1;
unsigned int end;
unsigned int ofs = thisPointer + part + 1;
for(int i=0; i<Part_Size; i++) // Calculating each partition in parallel???
{
if(part + i*numParts >= degree) // Check to see if we have gone beyond the possible distance
break;
end = ofs + i*numParts;
if(dist[edgeList[end]] == DIST_INFINITY)
{
dist[edgeList[end]] = level + 1;
*finished = false;
}
}
}
}
__global__ void clearLabel(bool *label, unsigned int size)
{
unsigned int id = blockDim.x * blockIdx.x + threadIdx.x;
if(id < size)
label[id] = false;
}
int main(int argc, char** argv)
{
ArgumentParser arguments(argc, argv, true, false);
Graph graph(arguments.input, false, arguments.printIntermediateResults);
graph.ReadGraph();
VirtualGraph vGraph(graph);
vGraph.MakeUGraph();
uint num_nodes = graph.num_nodes;
uint num_edges = graph.num_edges;
if(arguments.hasDeviceID)
hipSetDevice(arguments.deviceID);
hipFree(0);
unsigned int *dist;
dist = new unsigned int[num_nodes];
for(int i=0; i<num_nodes; i++)
{
dist[i] = DIST_INFINITY;
}
dist[arguments.sourceNode] = 0;
unsigned int *d_nodePointer;
unsigned int *d_edgeList;
unsigned int *d_dist;
PartPointer *d_partNodePointer;
bool finished;
bool *d_finished;
gpuErrorcheck(hipMalloc(&d_nodePointer, num_nodes * sizeof(unsigned int)));
gpuErrorcheck(hipMalloc(&d_edgeList, (num_edges + num_nodes) * sizeof(unsigned int)));
gpuErrorcheck(hipMalloc(&d_dist, num_nodes * sizeof(unsigned int)));
gpuErrorcheck(hipMalloc(&d_finished, sizeof(bool)));
gpuErrorcheck(hipMalloc(&d_partNodePointer, vGraph.numParts * sizeof(PartPointer)));
gpuErrorcheck(hipMemcpy(d_nodePointer, vGraph.nodePointer, num_nodes * sizeof(unsigned int), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(d_edgeList, vGraph.edgeList, (num_edges + num_nodes) * sizeof(unsigned int), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(d_dist, dist, num_nodes * sizeof(unsigned int), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(d_partNodePointer, vGraph.partNodePointer, vGraph.numParts * sizeof(PartPointer), hipMemcpyHostToDevice));
Timer t;
t.Start();
int itr = 0;
int level = 0;
do
{
itr++;
finished = true;
gpuErrorcheck(hipMemcpy(d_finished, &finished, sizeof(bool), hipMemcpyHostToDevice));
if(itr % 2 == 1)
{
hipLaunchKernelGGL(( kernel), dim3(vGraph.numParts/512 + 1) , dim3(512) , 0, 0, vGraph.numParts,
d_nodePointer,
d_partNodePointer,
d_edgeList,
d_dist,
d_finished,
level);
}
else
{
hipLaunchKernelGGL(( kernel), dim3(vGraph.numParts/512 + 1) , dim3(512) , 0, 0, vGraph.numParts,
d_nodePointer,
d_partNodePointer,
d_edgeList,
d_dist,
d_finished,
level);
}
gpuErrorcheck( hipPeekAtLastError() );
gpuErrorcheck( hipDeviceSynchronize() );
gpuErrorcheck(hipMemcpy(&finished, d_finished, sizeof(bool), hipMemcpyDeviceToHost));
level++;
} while (!(finished));
cout << "Number of iterations = " << itr << endl;
float runtime = t.Finish();
cout << "Processing finished in " << runtime << " (ms).\n";
gpuErrorcheck(hipMemcpy(dist, d_dist, num_nodes*sizeof(unsigned int), hipMemcpyDeviceToHost));
if(num_nodes < 30)
{
utilities::PrintResults(dist, num_nodes);
}
else
{
utilities::PrintResults(dist, 30);
}
if(arguments.hasOutput)
utilities::SaveResults(arguments.output, dist, num_nodes);
gpuErrorcheck(hipFree(d_nodePointer));
gpuErrorcheck(hipFree(d_edgeList));
gpuErrorcheck(hipFree(d_dist));
gpuErrorcheck(hipFree(d_finished));
gpuErrorcheck(hipFree(d_partNodePointer));
}
|
510289562ccc3895ccc8791b087f62d82a219326.cu
|
#include "../shared/timer.hpp"
#include "../shared/tigr_utilities.hpp"
#include "../shared/graph.hpp"
#include "../shared/virtual_graph.hpp"
#include "../shared/globals.hpp"
#include "../shared/argument_parsing.hpp"
#include "../shared/gpu_error_check.cuh"
__global__ void kernel(unsigned int numParts,
unsigned int *nodePointer,
PartPointer *partNodePointer,
unsigned int *edgeList,
unsigned int *dist,
bool *finished,
int level)
{
unsigned int partId = blockDim.x * blockIdx.x + threadIdx.x;
if(partId < numParts)
{
unsigned int id = partNodePointer[partId].node; // original graph node index
unsigned int part = partNodePointer[partId].part; // node's partition number
if(dist[id] != level)
return;
unsigned int thisPointer = nodePointer[id]; // index in edge list for node ID
unsigned int degree = edgeList[thisPointer]; // degree of the node
unsigned int numParts;
if(degree % Part_Size == 0) // Recalculating the number of partitions required for this node
numParts = degree / Part_Size ;
else
numParts = degree / Part_Size + 1;
unsigned int end;
unsigned int ofs = thisPointer + part + 1;
for(int i=0; i<Part_Size; i++) // Calculating each partition in parallel???
{
if(part + i*numParts >= degree) // Check to see if we have gone beyond the possible distance
break;
end = ofs + i*numParts;
if(dist[edgeList[end]] == DIST_INFINITY)
{
dist[edgeList[end]] = level + 1;
*finished = false;
}
}
}
}
__global__ void clearLabel(bool *label, unsigned int size)
{
unsigned int id = blockDim.x * blockIdx.x + threadIdx.x;
if(id < size)
label[id] = false;
}
int main(int argc, char** argv)
{
ArgumentParser arguments(argc, argv, true, false);
Graph graph(arguments.input, false, arguments.printIntermediateResults);
graph.ReadGraph();
VirtualGraph vGraph(graph);
vGraph.MakeUGraph();
uint num_nodes = graph.num_nodes;
uint num_edges = graph.num_edges;
if(arguments.hasDeviceID)
cudaSetDevice(arguments.deviceID);
cudaFree(0);
unsigned int *dist;
dist = new unsigned int[num_nodes];
for(int i=0; i<num_nodes; i++)
{
dist[i] = DIST_INFINITY;
}
dist[arguments.sourceNode] = 0;
unsigned int *d_nodePointer;
unsigned int *d_edgeList;
unsigned int *d_dist;
PartPointer *d_partNodePointer;
bool finished;
bool *d_finished;
gpuErrorcheck(cudaMalloc(&d_nodePointer, num_nodes * sizeof(unsigned int)));
gpuErrorcheck(cudaMalloc(&d_edgeList, (num_edges + num_nodes) * sizeof(unsigned int)));
gpuErrorcheck(cudaMalloc(&d_dist, num_nodes * sizeof(unsigned int)));
gpuErrorcheck(cudaMalloc(&d_finished, sizeof(bool)));
gpuErrorcheck(cudaMalloc(&d_partNodePointer, vGraph.numParts * sizeof(PartPointer)));
gpuErrorcheck(cudaMemcpy(d_nodePointer, vGraph.nodePointer, num_nodes * sizeof(unsigned int), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(d_edgeList, vGraph.edgeList, (num_edges + num_nodes) * sizeof(unsigned int), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(d_dist, dist, num_nodes * sizeof(unsigned int), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(d_partNodePointer, vGraph.partNodePointer, vGraph.numParts * sizeof(PartPointer), cudaMemcpyHostToDevice));
Timer t;
t.Start();
int itr = 0;
int level = 0;
do
{
itr++;
finished = true;
gpuErrorcheck(cudaMemcpy(d_finished, &finished, sizeof(bool), cudaMemcpyHostToDevice));
if(itr % 2 == 1)
{
kernel<<< vGraph.numParts/512 + 1 , 512 >>>(vGraph.numParts,
d_nodePointer,
d_partNodePointer,
d_edgeList,
d_dist,
d_finished,
level);
}
else
{
kernel<<< vGraph.numParts/512 + 1 , 512 >>>(vGraph.numParts,
d_nodePointer,
d_partNodePointer,
d_edgeList,
d_dist,
d_finished,
level);
}
gpuErrorcheck( cudaPeekAtLastError() );
gpuErrorcheck( cudaDeviceSynchronize() );
gpuErrorcheck(cudaMemcpy(&finished, d_finished, sizeof(bool), cudaMemcpyDeviceToHost));
level++;
} while (!(finished));
cout << "Number of iterations = " << itr << endl;
float runtime = t.Finish();
cout << "Processing finished in " << runtime << " (ms).\n";
gpuErrorcheck(cudaMemcpy(dist, d_dist, num_nodes*sizeof(unsigned int), cudaMemcpyDeviceToHost));
if(num_nodes < 30)
{
utilities::PrintResults(dist, num_nodes);
}
else
{
utilities::PrintResults(dist, 30);
}
if(arguments.hasOutput)
utilities::SaveResults(arguments.output, dist, num_nodes);
gpuErrorcheck(cudaFree(d_nodePointer));
gpuErrorcheck(cudaFree(d_edgeList));
gpuErrorcheck(cudaFree(d_dist));
gpuErrorcheck(cudaFree(d_finished));
gpuErrorcheck(cudaFree(d_partNodePointer));
}
|
a208e719bf00fc6487d5dacba417961933e61b9c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <iostream>
#include <ostream>
#include <fstream>
#include <sys/time.h>
#include <time.h>
using namespace std;
#define CASENAME "LR1_o2"
#define BLOCKSIZEX 64
#define BLOCKSIZEY 1
#define BLOCKSIZEZ 1
#define BLOCKSIZELRX 64
#define BLOCKSIZELRY 1
#define BLOCKSIZELRZ 1
#define BLOCKSIZEINTERP 8
#define XDIM 251
#define YDIM 43
#define ZDIM 43
#define TMAX 20000
#define STARTF 0
#define OBSTR1 5.f
#define OBSTX1 50.5f
#define OBSTY1 20.5f
#define OBSTZ1 32.5f
#define OBSTR2 32.f
#define OBSTX2 319.5f
#define OBSTY2 511.5f
#define OBSTZ2 31.5f
#define LRFACTOR 0.5f
#define LRLEVEL 2
#define LRX0 30.25f //minimum x coord of LR
#define XLRDIM 128 //number of nodes in x
#define LRY0 10.25f
#define YLRDIM 42
#define LRZ0 -0.75f
#define ZLRDIM 86
#define ORDER 1 //order of accuracy of interpolation
//#define LRFACTOR 0.25f
//#define LRLEVEL 4
//#define LRX0 30.125f //minimum x coord of LR
//#define XLRDIM 256 //number of nodes in x
//#define LRY0 10.125f
//#define YLRDIM 84
//#define LRZ0 -0.875f
//#define ZLRDIM 172
//#define ORDER 2 //order of accuracy of interpolation
#define RE 20.f//2000.f//100.f;
#define UMAX 0.04f
#define SmagLES 0 //1,0
#define MODEL "MRT" //BGK,MRT,STREAM
#define REFINEMENT 1 //1,0
#define CS 0.02f
#define VELAV 1
#define START_VELAV 40000
#define START_VELFLUC 80000
inline __device__ int ImageFcnLR(float x, float y, float z)
{
int value = 0;
// if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1))<OBSTR1*OBSTR1)
// value = 10;
// else if(((x-OBSTX2)*(x-OBSTX2)+(y-OBSTY2)*(y-OBSTY2))<OBSTR2*OBSTR2)
// value = 10;
if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
{
value = 10;
// if(z < 1 || z > ZDIM-2)
// value = 1;
}
if(z < 0.5f)
value = 1;
if(z > ZDIM-1-0.5f)
value = 1;
return value;
}
inline __device__ int ImageFcn(int x, int y, int z)
{
int value = 0;
//Cylinder
// if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1))<OBSTR1*OBSTR1)
// value = 10;
// else if(((x-OBSTX2)*(x-OBSTX2)+(y-OBSTY2)*(y-OBSTY2))<OBSTR2*OBSTR2)
// value = 10;
// if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
// value = 10;
//Lid Driven Cavity
// if(y == 0 || y == YDIM-1 || z == 0 || z == ZDIM-1)
// value = 1;
// else if(x == XDIM-2 || y == 1 || y == YDIM-2 || z == 1 || z == ZDIM-2)
// return 1;
// else if(x == 0)
// return 1;
if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
value = 10;
// if(z == 1)
// value = 1;
// if(z == ZDIM-2)
// value = 1;
if(y == 0)
value = 1;//200;//22;
else if(y == YDIM-1)
value = 1;//100;
else if(x == 0)
value = 400;//26;
else if(x == XDIM-1)
//else if(x > 42)
value = 300;//25;
else if(z == 0)
value = 1;
else if(z == ZDIM-1)
value = 1;
return value;
}
inline __device__ float PoisProf (float x){
float radius = (YDIM-1-1)*0.5f;
float result = -1.5f*(((1.0f-(x-0.5f)/radius))*((1.0f-(x-0.5f)/radius))-1.0f);
return (result);
}
inline __device__ float PoisProf3D (float x, float y){
x = x-0.5f;
y = y-0.5f;
float H = 41.f;
return 2.25f*16.f*UMAX*x*y*(H-x)*(H-y)/((H)*(H)*(H)*(H));
// float radius = (YDIM-1-1)*0.5f;
// float result = -1.0f*(((1.0f-(x-0.5f)/radius))*((1.0f-(x-0.5f)/radius))-1.0f);
// return (result);
}
int
timeval_subtract (double *result, struct timeval *x, struct timeval *y)
{
struct timeval result0;
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result0.tv_sec = x->tv_sec - y->tv_sec;
result0.tv_usec = x->tv_usec - y->tv_usec;
*result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
__device__ int dmin(int a, int b)
{
if (a<b) return a;
else return b-1;
}
__device__ int dmax(int a)
{
if (a>-1) return a;
else return 0;
}
__device__ int dmax(int a,int b)
{
if (a>b) return a;
else return b;
}
__device__ int dmin_p(int a, int b)
{
if (a<b) return a;
else return 0;
}
__device__ int dmax_p(int a, int b)
{
if (a>-1) return a;
else return b-1;
}
inline __device__ float trilinear_interp (float v000, float v001, float v010, float v011,
float v100, float v101, float v110, float v111, float x, float y, float z){
return v000*(1.f-x)*(1.f-y)*(1.f-z)+
v001*( x)*(1.f-y)*(1.f-z)+
v010*(1.f-x)*( y)*(1.f-z)+
v011*( x)*( y)*(1.f-z)+
v100*(1.f-x)*(1.f-y)*( z)+
v101*( x)*(1.f-y)*( z)+
v110*(1.f-x)*( y)*( z)+
v111*( x)*( y)*( z);
}
inline __device__ int f_mem(int f_num, int x, int y, int z, size_t pitch, int zInner)
{
int index = (x+y*pitch+z*YDIM*pitch)+f_num*pitch*YDIM*(zInner);
index = dmax(index);
index = dmin(index,19*pitch*YDIM*(zInner));
return index;
}
inline __device__ int f_memLR(int f_num, int x, int y, int z, size_t pitch, int zInner)
{
int index = (x+y*pitch+z*YLRDIM*pitch)+f_num*pitch*YLRDIM*(zInner);
index = dmax(index);
index = dmin(index,19*pitch*YLRDIM*(zInner));
return index;
}
inline __device__ int f_mem_interp(int m_num, int x, int y, int z, int pitch, int zInner)
{
int index = (x+y*pitch+z*(YLRDIM*LRFACTOR+1)*pitch)+m_num*pitch*(YLRDIM*LRFACTOR+1)*(zInner);
index = dmax(index);
index = dmin(index,9*pitch*(YLRDIM*LRFACTOR+1)*(zInner));
return index;
}
inline __device__ int buff_mem_interp(int m_num, int x, int y, int pitch, int zInner)
{
int index = (x+y*pitch+m_num*(YLRDIM*LRFACTOR+1)*pitch);
index = dmax(index);
index = dmin(index,9*pitch*(YLRDIM*LRFACTOR+1));
return index;
}
inline __device__ int buff_mem(int f_num, int x, int y, size_t pitch)
{
int index = (x+y*pitch)+f_num*pitch*YDIM;
index = dmax(index);
index = dmin(index,19*pitch*YDIM);
return index;
}
inline __device__ int buff_memLR(int f_num, int x, int y, size_t pitch)
{
int index = (x+y*pitch)+f_num*pitch*YLRDIM;
index = dmax(index);
index = dmin(index,19*pitch*YLRDIM);
return index;
}
inline __device__ void Moments(float* f, float* m)
{
m[0 ] = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18];
m[1 ] = -30.f*f[0]+-11.f*f[1]+-11.f*f[2]+-11.f*f[3]+-11.f*f[4]+ 8.f*f[5]+ 8.f*f[6]+ 8.f*f[7]+ 8.f*f[8]+-11.f*f[9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18];
m[2 ] = 12.f*f[0]+ -4.f*f[1]+ -4.f*f[2]+ -4.f*f[3]+ -4.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ -4.f*f[9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18];
m[3 ] = f[1]-f[3]+f[5]-f[6]-f[7]+f[8]+f[10]-f[12]+f[15]-f[17];
m[4 ] = -4.f*f[1] + 4.f*f[3] + f[5]+ - f[6]+ - f[7]+ f[8] + f[10] + - f[12] + f[15] + - f[17] ;
m[5 ] = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
m[6 ] = -4.f*f[2] + 4.f*f[4]+ f[5]+ f[6]+ - f[7]+ - f[8] + f[11] + - f[13] + f[16] + - f[18];
m[7 ] = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
m[8 ] = + -4.f*f[9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18];
m[9 ] = 2.f*f[1]+ - f[2]+ 2.f*f[3]+ - f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ - f[9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[10] = -4.f*f[1]+ 2.f*f[2]+ -4.f*f[3]+ 2.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ 2.f*f[9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[11] = f[2] + f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ - f[9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ;
m[12] = -2.f*f[2] -2.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ 2.f*f[9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ;
m[13] = f[5]+ - f[6]+ f[7]+ - f[8] ;
m[14] = f[11] + - f[13] + - f[16] + f[18];
m[15] = f[10] + - f[12] + - f[15] + f[17] ;
m[16] = f[5]+ - f[6]+ - f[7]+ f[8] - f[10] + f[12] + - f[15] + f[17] ;
m[17] = - f[5]+ - f[6]+ f[7]+ f[8] + f[11] + - f[13] + f[16] + - f[18];
m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18];
}
void Moments_host(float* f, float* m)
{
m[0 ] = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18];
m[1 ] = -30.f*f[0]+-11.f*f[1]+-11.f*f[2]+-11.f*f[3]+-11.f*f[4]+ 8.f*f[5]+ 8.f*f[6]+ 8.f*f[7]+ 8.f*f[8]+-11.f*f[9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18];
m[2 ] = 12.f*f[0]+ -4.f*f[1]+ -4.f*f[2]+ -4.f*f[3]+ -4.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ -4.f*f[9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18];
m[3 ] = f[1]-f[3]+f[5]-f[6]-f[7]+f[8]+f[10]-f[12]+f[15]-f[17];
m[4 ] = -4.f*f[1] + 4.f*f[3] + f[5]+ - f[6]+ - f[7]+ f[8] + f[10] + - f[12] + f[15] + - f[17] ;
m[5 ] = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
m[6 ] = -4.f*f[2] + 4.f*f[4]+ f[5]+ f[6]+ - f[7]+ - f[8] + f[11] + - f[13] + f[16] + - f[18];
m[7 ] = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
m[8 ] = + -4.f*f[9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18];
m[9 ] = 2.f*f[1]+ - f[2]+ 2.f*f[3]+ - f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ - f[9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[10] = -4.f*f[1]+ 2.f*f[2]+ -4.f*f[3]+ 2.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ 2.f*f[9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[11] = f[2] + f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ - f[9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ;
m[12] = -2.f*f[2] -2.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ 2.f*f[9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ;
m[13] = f[5]+ - f[6]+ f[7]+ - f[8] ;
m[14] = f[11] + - f[13] + - f[16] + f[18];
m[15] = f[10] + - f[12] + - f[15] + f[17] ;
m[16] = f[5]+ - f[6]+ - f[7]+ f[8] - f[10] + f[12] + - f[15] + f[17] ;
m[17] = - f[5]+ - f[6]+ f[7]+ f[8] + f[11] + - f[13] + f[16] + - f[18];
m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18];
}
void InvertMoments_host(float* f, float* m)
{
float u = m[3];
float v = m[5];
float w = m[7];
f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2]));
f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
}
inline __device__ void mrt_meq(float* meq, float rho, float u, float v, float w)
{
meq[ 0] = rho;
meq[ 1] = -11.f*rho+19.f*(u*u+v*v+w*w);
meq[ 2] = 7.53968254f*(u*u+v*v+w*w);;
meq[ 3] = u;
meq[ 4] = -0.666666667f*u;
meq[ 5] = v;
meq[ 6] = -0.666666667f*v;
meq[ 7] = w;
meq[ 8] = -0.666666667f*w;
meq[ 9] = 2.f*u*u-(v*v+w*w);
meq[11] = v*v-w*w;
meq[13] = u*v;
meq[14] = v*w;
meq[15] = u*w;
}
//outputs physical moments (rho,u,v,w,Pxx,Pww,Pxy,Pyz,Pxz) from f
inline __device__ void PhysicalMoments(float* mom, float* f)
{
mom[0] = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18];
mom[1] = f[1]-f[3]+f[5]-f[6]-f[7]+f[8]+f[10]-f[12]+f[15]-f[17];
mom[2] = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
mom[3] = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
mom[4] = 2.f*f[1]+-f[2]+2.f*f[3]+-f[4]+f[5]+f[6]+f[7]+f[8]+-f[9]+f[10]+-2.f*f[11]+f[12]+-2.f*f[13]+-f[14]+f[15]+-2.f*f[16]+f[17]+-2.f*f[18];
mom[5] = f[2]+f[4]+f[5]+f[6]+f[7]+f[8]+-f[9]+-f[10]+-f[12]+-f[14]+-f[15]+-f[17];
mom[6] = f[5]+-f[6]+f[7]+-f[8];
mom[7] = f[11]+-f[13]+-f[16]+f[18];
mom[8] = f[10]+-f[12]+-f[15]+f[17];
}
inline __device__ void InvertMoments(float* f, float* m)
{
float u = m[3];
float v = m[5];
float w = m[7];
f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2]));
f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
}
inline __device__ void InvertPhysicalMoments(float* f, float* mom, float SF)
{
float m[19]={0};
m[ 0] = mom[0];
m[ 1] = (-11.f*mom[0]+19.f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3]));
m[ 2] = 7.53968254f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3]);
m[ 3] = mom[1];
m[ 4] = -0.666666667f*mom[1];
m[ 5] = mom[2];
m[ 6] = -0.666666667f*mom[2];
m[ 7] = mom[3];
m[ 8] = -0.666666667f*mom[3];
m[ 9] = mom[4]*SF+(1.f-SF)*(2.f*mom[1]*mom[1]-(mom[2]*mom[2]+mom[3]*mom[3]));
m[11] = mom[5]*SF+(1.f-SF)*(mom[2]*mom[2]-mom[3]*mom[3]);
m[13] = mom[6]*SF+(1.f-SF)*mom[1]*mom[2];
m[14] = mom[7]*SF+(1.f-SF)*mom[2]*mom[3];
m[15] = mom[8]*SF+(1.f-SF)*mom[1]*mom[3];
// InvertMoments(f,m);
float u = m[3];
float v = m[5];
float w = m[7];
f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2]));
f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
}
//outputs strain rate tensor (Sxx,Syy,Szz,Sxy,Syz,Sxz) from inputs (m0,m3,m5,m7,m9,m11,m13,m14,m15)
inline __device__ void StrainRate(float* S, float* m_strain, float omega)
{
float m1 = (-11.f*m_strain[0]+19.f*(m_strain[1]*m_strain[1]+m_strain[2]*m_strain[2]+m_strain[3]*m_strain[3]));
float m9 = m_strain[4];
float m11= m_strain[5];
float m13= m_strain[6];
float m14= m_strain[7];
float m15= m_strain[8];
S[0] = -0.026315789f*( m1+19.f*omega* m9);
S[1] = -0.013157895f*(2.f*m1-19.f*omega*(m9-3.f*m11));
S[2] = -0.013157895f*(2.f*m1-19.f*omega*(m9+3.f*m11));
S[3] = -1.5f*omega*m13;
S[4] = -1.5f*omega*m14;
S[5] = -1.5f*omega*m15;
}
inline __device__ void mrt_collide(float* f, float omega)
{
float m[19];
//float u,v,w;
m[3] = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
m[5] = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
m[7] = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
m[0] = f[ 0]+f[ 1]+f[ 2]+f[ 3]+f[ 4]+f[ 5]+f[ 6]+f[ 7]+f[ 8]+f[ 9]+
f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18];
m[ 1] = 19.f*(-f[ 0]+ f[ 5]+f[ 6]+f[ 7]+f[ 8]+f[10]+f[11]+f[12]+f[13]+f[15]+f[16]+f[17]+f[18] -(m[3]*m[3]+m[5]*m[5]+m[7]*m[7]));//+8.f*(f[ 5]+f[ 6]+f[ 7]+f[ 8]+f[10]+f[11]+f[12]+f[13]+f[15]+f[16]+f[17]+f[18]);
m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18] -7.53968254f*(m[3]*m[3]+m[5]*m[5]+m[7]*m[7]);
m[ 4] = 1.666666667f*(-3.f*f[1]+3.f*f[ 3]+m[3]);
m[ 6] = 1.666666667f*(-3.f*f[2]+3.f*f[ 4]+m[5]);
m[ 8] = 1.666666667f*(-3.f*f[9]+3.f*f[14]+m[7]);
m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+- f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+-2.f*f[13]+- f[14]+ f[15]+ -2.f*f[16]+ f[17]+-2.f*f[18] -(2.f*m[3]*m[3]-(m[5]*m[5]+m[7]*m[7]));
m[10] =-4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+-2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+-2.f*f[18];
m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+- f[ 9]+-f[10] +-f[12] +- f[14]+-f[15] +-f[17] -(m[5]*m[5]-m[7]*m[7]);
m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+-f[10] +-f[12] + 2.f*f[14]+-f[15] +-f[17] ;
m[13] = f[ 5]+-f[ 6]+ f[ 7]+-f[ 8] -m[3]*m[5];
m[14] = f[11] +- f[13] + - f[16] + f[18] -m[5]*m[7];
m[15] = f[10] + - f[12] +-f[15] + f[17] -m[3]*m[7];
m[16] = f[ 5]+-f[ 6]+-f[ 7]+ f[ 8] -f[10] + f[12] +-f[15] + f[17] ;
m[17] = -f[ 5]+-f[ 6]+ f[ 7]+ f[ 8] + f[11] +- f[13] + f[16] +- f[18];
m[18] = f[10]+- f[11]+ f[12]+- f[13] +-f[15]+ f[16]+-f[17]+ f[18];
if(SmagLES == 1)
{
float Pxx = 0.33333333f*(m[1]+2.f*m[0])+m[9];
float Pyy = Pxx+0.5f*(m[11]-m[9]);//0.3333333f*(m[1]+2.f*m[0]+0.5f*(3.f*m[11]-m[9]));
float Pzz = Pyy-m[11];
float Q11 = 0.33333333f*m[0]+m[3]*m[3]-Pxx;
float Q22 = 0.33333333f*m[0]+m[5]*m[5]-Pyy;
float Q33 = 0.33333333f*m[0]+m[7]*m[7]-Pzz;
float Q12 = 0.33333333f*m[0]+m[3]*m[5]-m[13];
float Q23 = 0.33333333f*m[0]+m[5]*m[7]-m[14];
float Q13 = 0.33333333f*m[0]+m[3]*m[7]-m[15];
float Q = sqrt(Q11*Q11+Q22*Q22+Q33*Q33+2.f*Q12*Q12+2.f*Q23*Q23+2.f*Q13*Q13);
float tau0 = 1.f/omega;
float tau = 0.5f*tau0+0.5f*sqrt(tau0*tau0+18.f*CS*sqrt(2.f)*Q);
omega = 1.f/tau;
}
f[ 0] -=- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2]);
f[ 1] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])*omega-m[10]);
f[ 2] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])*omega-m[10])+ 0.083333333f*((m[11])*omega-m[12]);
f[ 3] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])*omega-m[10]);
f[ 4] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])*omega-m[10])+ 0.083333333f*((m[11])*omega-m[12]);
f[ 5] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ omega*(0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13])));
f[ 6] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ omega*(0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13])));
f[ 7] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ omega*(0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13])));
f[ 8] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ omega*(0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13])));
f[ 9] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])*omega-m[10])+-0.083333333f*((m[11])*omega-m[12]);
f[10] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ omega*(0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15])));
f[11] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+omega*(-0.055555556f*(m[ 9]) +( 0.25f*(m[14])));
f[12] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ omega*(0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15])));
f[13] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+omega*(-0.055555556f*(m[ 9]) +(-0.25f*(m[14])));
f[14] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])*omega-m[10])+-0.083333333f*((m[11])*omega-m[12]);
f[15] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ omega*(0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15])));
f[16] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+omega*(-0.055555556f*(m[ 9]) +(-0.25f*(m[14])));
f[17] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ omega*(0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15])));
f[18] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+omega*(-0.055555556f*(m[ 9]) +( 0.25f*(m[14])));
}
inline __device__ void North_Extrap(float* f, float rho)
{
float m[19];
//rho = 1.0f;
float u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
float v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
float w = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
m[ 1] = -30.f*f[ 0]+-11.f*f[ 1]+-11.f*f[ 2]+-11.f*f[ 3]+-11.f*f[ 4]+ 8.f*f[ 5]+ 8.f*f[ 6]+ 8.f*f[ 7]+ 8.f*f[ 8]+-11.f*f[ 9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18];
m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18];
m[ 4] = -4.f*f[ 1] + 4.f*f[ 3] + f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] + f[10] + - f[12] + f[15] + - f[17] ;
m[ 6] = -4.f*f[ 2] + 4.f*f[ 4]+ f[ 5]+ f[ 6]+ - f[ 7]+ - f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[ 8] = + -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18];
m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[10] = -4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ;
m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ;
m[13] = f[ 5]+ - f[ 6]+ f[ 7]+ - f[ 8] ;
m[14] = f[11] + - f[13] + - f[16] + f[18];
m[15] = f[10] + - f[12] + - f[15] + f[17] ;
m[16] = f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] - f[10] + f[12] + - f[15] + f[17] ;
m[17] = - f[ 5]+ - f[ 6]+ f[ 7]+ f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18];
f[ 0] =(0.052631579f*rho +- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2]));
f[ 1] =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 2] =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 3] =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 4] =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 5] =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 6] =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 7] =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 8] =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 9] =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
}
inline __device__ void South_Extrap(float* f, float v)
{
float m[19];
float u = 0.f;//f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
float w = 0.f;//f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
float rho = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18];
m[ 1] = -30.f*f[ 0]+-11.f*f[ 1]+-11.f*f[ 2]+-11.f*f[ 3]+-11.f*f[ 4]+ 8.f*f[ 5]+ 8.f*f[ 6]+ 8.f*f[ 7]+ 8.f*f[ 8]+-11.f*f[ 9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18];
m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18];
m[ 4] = -4.f*f[ 1] + 4.f*f[ 3] + f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] + f[10] + - f[12] + f[15] + - f[17] ;
m[ 6] = -4.f*f[ 2] + 4.f*f[ 4]+ f[ 5]+ f[ 6]+ - f[ 7]+ - f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[ 8] = + -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18];
m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[10] = -4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ;
m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ;
m[13] = f[ 5]+ - f[ 6]+ f[ 7]+ - f[ 8] ;
m[14] = f[11] + - f[13] + - f[16] + f[18];
m[15] = f[10] + - f[12] + - f[15] + f[17] ;
m[16] = f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] - f[10] + f[12] + - f[15] + f[17] ;
m[17] = - f[ 5]+ - f[ 6]+ f[ 7]+ f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18];
f[ 0] =(0.052631579f*rho +- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2]));
f[ 1] =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 2] =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 3] =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 4] =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 5] =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 6] =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 7] =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 8] =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 9] =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
}
inline __device__ void East_Extrap(float* f, float rho)
{
float m[19];
//rho = 0.0f;
float u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
float v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
float w = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
m[ 1] = -30.f*f[ 0]+-11.f*f[ 1]+-11.f*f[ 2]+-11.f*f[ 3]+-11.f*f[ 4]+ 8.f*f[ 5]+ 8.f*f[ 6]+ 8.f*f[ 7]+ 8.f*f[ 8]+-11.f*f[ 9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18];
m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18];
m[ 4] = -4.f*f[ 1] + 4.f*f[ 3] + f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] + f[10] + - f[12] + f[15] + - f[17] ;
m[ 6] = -4.f*f[ 2] + 4.f*f[ 4]+ f[ 5]+ f[ 6]+ - f[ 7]+ - f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[ 8] = + -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18];
m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[10] = -4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ;
m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ;
m[13] = f[ 5]+ - f[ 6]+ f[ 7]+ - f[ 8] ;
m[14] = f[11] + - f[13] + - f[16] + f[18];
m[15] = f[10] + - f[12] + - f[15] + f[17] ;
m[16] = f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] - f[10] + f[12] + - f[15] + f[17] ;
m[17] = - f[ 5]+ - f[ 6]+ f[ 7]+ f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18];
f[ 0] =(0.052631579f*rho +- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2]));
f[ 1] =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 2] =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 3] =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 4] =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 5] =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 6] =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 7] =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 8] =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 9] =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
}
inline __device__ void West_Extrap(float* f, float u)
{
float m[19];
float v = 0.f;//f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
float w = 0.f;//f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
float rho = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18];
m[ 1] = -30.f*f[ 0]+-11.f*f[ 1]+-11.f*f[ 2]+-11.f*f[ 3]+-11.f*f[ 4]+ 8.f*f[ 5]+ 8.f*f[ 6]+ 8.f*f[ 7]+ 8.f*f[ 8]+-11.f*f[ 9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18];
m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18];
m[ 4] = -4.f*f[ 1] + 4.f*f[ 3] + f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] + f[10] + - f[12] + f[15] + - f[17] ;
m[ 6] = -4.f*f[ 2] + 4.f*f[ 4]+ f[ 5]+ f[ 6]+ - f[ 7]+ - f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[ 8] = + -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18];
m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[10] = -4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ;
m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ;
m[13] = f[ 5]+ - f[ 6]+ f[ 7]+ - f[ 8] ;
m[14] = f[11] + - f[13] + - f[16] + f[18];
m[15] = f[10] + - f[12] + - f[15] + f[17] ;
m[16] = f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] - f[10] + f[12] + - f[15] + f[17] ;
m[17] = - f[ 5]+ - f[ 6]+ f[ 7]+ f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18];
f[ 0] =(0.052631579f*rho +- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2]));
f[ 1] =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 2] =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 3] =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 4] =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 5] =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 6] =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 7] =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 8] =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 9] =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
}
__device__ void xsymmetry_bot(float* f, int y, int z)
{
if(y == 0 && z == 0){
f[ 2] = f[ 4];
f[13]=f[18];
f[11]=f[18];
f[16]=f[18];
f[ 6] =f[ 7];
f[ 9] =f[14];
f[12]=f[17];
}
else if(y == 0 && z == ZDIM-1){
f[ 4] = f[ 2];
f[11]=f[13];
f[18]=f[13];
f[16]=f[13];
f[ 6] =f[ 7];
f[14]=f[ 9];
f[17]=f[12];
}
else if(y == YDIM-1 && z == 0){
f[ 4] = f[ 2];
f[11]=f[16];
f[18]=f[16];
f[13]=f[16];
f[ 7] =f[ 6];
f[ 9] =f[14];
f[12]=f[17];
}
else if(y == YDIM-1 && z == ZDIM-1){
f[ 4] = f[ 2];
f[16]=f[11];
f[18]=f[11];
f[13]=f[11];
f[ 7] =f[ 6];
f[14]=f[ 9];
f[17]=f[12];
}
else{
if(y == 0){
f[ 2] = f[ 4];
f[11]=f[13];
f[16]=f[18];
f[ 8] = f[ 5];
}
else if(y == YDIM-1){
f[ 4]=f[ 2] ;
f[13]=f[11];
f[18]=f[16];
f[ 5]=f[ 8] ;
}
}
f[ 1] = f[ 3] ;
f[ 5] = f[ 6] ;
f[ 8] = f[ 7] ;
f[10]= f[12];
f[15]= f[17];
}
__device__ void xsymmetry_top(float* f, int y, int z)
{
if(y == 0 && z == 0){
f[ 2] = f[ 4];
f[13] = f[18];
f[11] = f[18];
f[16] = f[18];
f[ 5] = f[ 8];
f[ 9] = f[14];
f[10] = f[15];
}
else if(y == 0 && z == ZDIM-1){
f[ 2] = f[ 4];
f[11] = f[13];
f[18] = f[13];
f[16] = f[13];
f[ 5] = f[ 8];
f[14] = f[ 9];
f[15] = f[10];
}
else if(y == YDIM-1 && z == 0){
f[ 4] = f[ 2];
f[18] = f[16];
f[11] = f[16];
f[13] = f[16];
f[ 8] = f[ 5];
f[ 9] = f[14];
f[10] = f[15];
}
else if(y == YDIM-1 && z == ZDIM-1){
f[ 4] = f[ 2];
f[13] = f[11];
f[16] = f[11];
f[18] = f[11];
f[ 8] = f[ 5];
f[14] = f[ 9];
f[15] = f[10];
}
else{
if(y == 0){
f[ 2] = f[ 4];
f[11] = f[13];
f[16] = f[18];
f[ 5] = f[ 8];
}
else if(y == YDIM-1){
f[ 4] = f[ 2];
f[13] = f[11];
f[18] = f[16];
f[ 8] = f[ 5];
}
}
f[ 3] = f[ 1] ;
f[ 6] = f[ 5] ;
f[ 7] = f[ 8] ;
f[12]= f[10];
f[17]= f[15];
}
inline __device__ void vel_av(float* f, float& uAv, float& vAv, int t)
{
float u,v;//,w;
u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
uAv = (uAv*(t-START_VELAV)+u)/((t-START_VELAV)+1);
vAv = (vAv*(t-START_VELAV)+v)/((t-START_VELAV)+1);
}
inline __device__ void vel_avLR(float* f, float& uAv, float& vAv, float t)
{
float u,v;//,w;
u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
uAv = (uAv*(t-START_VELAV)+u*LRFACTOR)/((t-START_VELAV)+LRFACTOR);
vAv = (vAv*(t-START_VELAV)+v*LRFACTOR)/((t-START_VELAV)+LRFACTOR);
}
inline __device__ void vel_fluc(float* f, float& uAv,
float& vAv, float& ufluc, float& vfluc, int t)
{
float u,v;//,w;
u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
u = (u-uAv)*(u-uAv);
v = (v-vAv)*(v-vAv);
ufluc = (ufluc*(t-START_VELFLUC)+u)/((t-START_VELFLUC)+1);
vfluc = (vfluc*(t-START_VELFLUC)+v)/((t-START_VELFLUC)+1);
}
inline __device__ void vel_flucLR(float* f, float& uAv,
float& vAv, float& ufluc, float& vfluc, float t)
{
float u,v;//,w;
u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
u = (u-uAv)*(u-uAv);
v = (v-vAv)*(v-vAv);
ufluc = (ufluc*(t-START_VELFLUC)+u*LRFACTOR)/((t-START_VELFLUC)+LRFACTOR);
vfluc = (vfluc*(t-START_VELFLUC)+v*LRFACTOR)/((t-START_VELFLUC)+LRFACTOR);
}
__global__ void initialize(float *fout, size_t pitch, int zInner, int GPU_N)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = x;
float ycoord = y;
float zcoord = z+1+GPU_N*ZDIM;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
float f[19] = {0};
float m[19] = {0};
int im = ImageFcn(xcoord,ycoord,zcoord);
float u,v,w,rho;
rho = 1.f;
u = 0.f;
v = 0.f;
w = 0.f;
if(im == 10 || im == 1){
u = 0.0f;
v = 0.0f;
w = 0.0f;
}
mrt_meq(m,rho,u,v,w);
InvertMoments(f,m);
for(int i = 0; i<19; i++)
fout[j+i *pitch*YDIM*zInner]=f[ i];
}
__global__ void initializeLR(float *fout, size_t pitch, int zInner, int GPU_N)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = x;
float ycoord = y;
float zcoord = z+1+GPU_N*(zInner+2);
xcoord = LRX0+x*LRFACTOR;
ycoord = LRY0+y*LRFACTOR;
zcoord = LRZ0+z*LRFACTOR;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
float f[19] = {0};
float m[19] = {0};
int im = ImageFcnLR(xcoord,ycoord,zcoord);
float u,v,w,rho;
rho = 1.f;
u = 0.0f;
v = 0.0f;
w = 0.0f;
if(im == 10 || im == 1){
u = 0.0f;
v = 0.0f;
w = 0.0f;
}
mrt_meq(m,rho,u,v,w);
InvertMoments(f,m);
for(int i = 0; i<19; i++)
fout[j+i *pitch*YLRDIM*zInner]=f[ i];
}
__global__ void update_top(float* hB, float* hA, float* fA, float* temp,
float omega, size_t pitch, int GPU, int zInner, float* FX, float* FY, float* FZ, int t, int flag_F, float* h_interp, size_t pitch_interp)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int j = x+y*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,(GPU+1)*(zInner+2)-1);
float f[19];
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f[0 ]= hA [j];
f[1 ]= hA [buff_mem(1 ,x-1,y ,pitch)];
f[3 ]= hA [buff_mem(3 ,x+1,y ,pitch)];
f[2 ]= hA [buff_mem(2 ,x ,y-1,pitch)];
f[5 ]= hA [buff_mem(5 ,x-1,y-1,pitch)];
f[6 ]= hA [buff_mem(6 ,x+1,y-1,pitch)];
f[4 ]= hA [buff_mem(4 ,x ,y+1,pitch)];
f[7 ]= hA [buff_mem(7 ,x+1,y+1,pitch)];
f[8 ]= hA [buff_mem(8 ,x-1,y+1,pitch)];
f[9 ]= fA [f_mem (9 ,x ,y ,zInner-1,pitch, zInner)];
f[10]= fA [f_mem (10,x-1,y ,zInner-1,pitch, zInner)];
f[11]= fA [f_mem (11,x ,y-1,zInner-1,pitch, zInner)];
f[12]= fA [f_mem (12,x+1,y ,zInner-1,pitch, zInner)];
f[13]= fA [f_mem (13,x ,y+1,zInner-1,pitch, zInner)];
f[14]= temp[buff_mem(14,x ,y ,pitch)];
f[15]= temp[buff_mem(15,x-1,y ,pitch)];
f[16]= temp[buff_mem(16,x ,y-1,pitch)];
f[17]= temp[buff_mem(17,x+1,y ,pitch)];
f[18]= temp[buff_mem(18,x ,y+1,pitch)];
if(im == 1 || im ==10){//BB
if(im == 10 && flag_F == 1){
check[0] = 1;
sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6];
sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17];
sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6];
sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18];
sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13];
sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
hB[buff_mem(0 ,x,y,pitch)] = f[0 ];
hB[buff_mem(1 ,x,y,pitch)] = f[3 ];
hB[buff_mem(2 ,x,y,pitch)] = f[4 ];
hB[buff_mem(3 ,x,y,pitch)] = f[1 ];
hB[buff_mem(4 ,x,y,pitch)] = f[2 ];
hB[buff_mem(5 ,x,y,pitch)] = f[7 ];
hB[buff_mem(6 ,x,y,pitch)] = f[8 ];
hB[buff_mem(7 ,x,y,pitch)] = f[5 ];
hB[buff_mem(8 ,x,y,pitch)] = f[6 ];
hB[buff_mem(9 ,x,y,pitch)] = f[14];
hB[buff_mem(10,x,y,pitch)] = f[17];
hB[buff_mem(11,x,y,pitch)] = f[18];
hB[buff_mem(12,x,y,pitch)] = f[15];
hB[buff_mem(13,x,y,pitch)] = f[16];
hB[buff_mem(14,x,y,pitch)] = f[9 ];
hB[buff_mem(15,x,y,pitch)] = f[12];
hB[buff_mem(16,x,y,pitch)] = f[13];
hB[buff_mem(17,x,y,pitch)] = f[10];
hB[buff_mem(18,x,y,pitch)] = f[11];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
if(im == 100)//north outlet
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x,y-1,pitch)];
North_Extrap(f,1.0f);
}
if(im == 200)//south inlet
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x,y+1,pitch)];
//South_Extrap(f,UMAX);
float u_in = PoisProf3D(x,(GPU+1)*(zInner+2)-1);
South_Extrap(f,u_in);
}
if(im == 300)//east outlet
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x-1,y,pitch)];
East_Extrap(f,1.0f);
}
if(im == 400)//west inlet
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x+1,y,pitch)];
float u_in = PoisProf3D(y,(GPU+1)*(zInner+2)-1);
West_Extrap(f,u_in);
}
if(im == 25)
xsymmetry_top(f,y,(GPU+1)*(zInner+2)-1);
if(im == 26)
xsymmetry_bot(f,y,(GPU+1)*(zInner+2)-1);
mrt_collide(f,omega);
for(int i = 0; i<19; i++)
hB[buff_mem(i ,x,y,pitch)] = f[i ];
}
if(REFINEMENT == 1){
if(x>=int(LRX0)&&x<=int(LRX0+XLRDIM*LRFACTOR)&&y>=int(LRY0)&&y<=int(LRY0+YLRDIM*LRFACTOR))
{
// if(x>int(LRX0+2)&&x<int(LRX0+XLRDIM*LRFACTOR-1)&&y>int(LRY0+2)&&y<int(LRY0+YLRDIM*LRFACTOR-1))
// {
// //do nothing
// }
// else{
// //float rho,u,v,w,m9,m11,m13,m14,m15;
float mom[9];
PhysicalMoments(mom,f);
for(int i = 0; i<9; i++)
h_interp[buff_mem_interp(i,x-int(LRX0),y-int(LRY0),pitch_interp,zInner)]=mom[i];
// }
}
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[t-STARTF],sumX[0]);
atomicAdd(&FY[t-STARTF],sumY[0]);
atomicAdd(&FZ[t-STARTF],sumZ[0]);
}
}
}
__global__ void update_bot(float* gB, float* gA, float* fA, float* temp,
float omega, size_t pitch, int GPU, int zInner, float* FX, float* FY, float* FZ, int t, int flag_F, float* g_interp, size_t pitch_interp)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int j = x+y*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,GPU*(zInner+2));
float f[19];
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f[0 ]= gA [j];
f[1 ]= gA [buff_mem(1 ,x-1,y ,pitch)];
f[3 ]= gA [buff_mem(3 ,x+1,y ,pitch)];
f[2 ]= gA [buff_mem(2 ,x ,y-1,pitch)];
f[5 ]= gA [buff_mem(5 ,x-1,y-1,pitch)];
f[6 ]= gA [buff_mem(6 ,x+1,y-1,pitch)];
f[4 ]= gA [buff_mem(4 ,x ,y+1,pitch)];
f[7 ]= gA [buff_mem(7 ,x+1,y+1,pitch)];
f[8 ]= gA [buff_mem(8 ,x-1,y+1,pitch)];
f[9 ]= temp[buff_mem(9 ,x ,y ,pitch)];
f[10]= temp[buff_mem(10,x-1,y ,pitch)];
f[11]= temp[buff_mem(11,x ,y-1,pitch)];
f[12]= temp[buff_mem(12,x+1,y ,pitch)];
f[13]= temp[buff_mem(13,x ,y+1,pitch)];
f[14]= fA [f_mem (14,x ,y ,0,pitch, zInner)];
f[15]= fA [f_mem (15,x-1,y ,0,pitch, zInner)];
f[16]= fA [f_mem (16,x ,y-1,0,pitch, zInner)];
f[17]= fA [f_mem (17,x+1,y ,0,pitch, zInner)];
f[18]= fA [f_mem (18,x ,y+1,0,pitch, zInner)];
if(im == 1 || im ==10){//BB
if(im == 10 && flag_F == 1){
check[0] = 1;
sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6];
sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17];
sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6];
sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18];
sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13];
sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
gB[buff_mem(0 ,x,y,pitch)] = f[0 ];
gB[buff_mem(1 ,x,y,pitch)] = f[3 ];
gB[buff_mem(2 ,x,y,pitch)] = f[4 ];
gB[buff_mem(3 ,x,y,pitch)] = f[1 ];
gB[buff_mem(4 ,x,y,pitch)] = f[2 ];
gB[buff_mem(5 ,x,y,pitch)] = f[7 ];
gB[buff_mem(6 ,x,y,pitch)] = f[8 ];
gB[buff_mem(7 ,x,y,pitch)] = f[5 ];
gB[buff_mem(8 ,x,y,pitch)] = f[6 ];
gB[buff_mem(9 ,x,y,pitch)] = f[14];
gB[buff_mem(10,x,y,pitch)] = f[17];
gB[buff_mem(11,x,y,pitch)] = f[18];
gB[buff_mem(12,x,y,pitch)] = f[15];
gB[buff_mem(13,x,y,pitch)] = f[16];
gB[buff_mem(14,x,y,pitch)] = f[9 ];
gB[buff_mem(15,x,y,pitch)] = f[12];
gB[buff_mem(16,x,y,pitch)] = f[13];
gB[buff_mem(17,x,y,pitch)] = f[10];
gB[buff_mem(18,x,y,pitch)] = f[11];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
if(im == 100)//north outlet
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x,y-1,pitch)];
North_Extrap(f,1.0f);
}
if(im == 200)//south inlet
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x,y+1,pitch)];
//South_Extrap(f,UMAX);
float u_in = PoisProf3D(x,GPU*(zInner+2));
South_Extrap(f,u_in);
}
if(im == 300)//east outlet
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x-1,y,pitch)];
East_Extrap(f,1.0f);
}
if(im == 400)//west inlet
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x+1,y,pitch)];
float u_in = PoisProf3D(y,GPU*(zInner+2));
West_Extrap(f,u_in);
}
if(im == 25)
xsymmetry_top(f,y,GPU*(zInner+2));
if(im == 26)
xsymmetry_bot(f,y,GPU*(zInner+2));
mrt_collide(f,omega);
for(int i = 0; i<19; i++)
gB[buff_mem(i ,x,y,pitch)] = f[i ];
}
if(REFINEMENT == 1){
if(x>=int(LRX0)&&x<=int(LRX0+XLRDIM*LRFACTOR)&&y>=int(LRY0)&&y<=int(LRY0+YLRDIM*LRFACTOR))
{
// if(x>int(LRX0+2)&&x<int(LRX0+XLRDIM*LRFACTOR-1)&&y>int(LRY0+2)&&y<int(LRY0+YLRDIM*LRFACTOR-1))
// {
// //do nothing
// }
// else{
//float rho,u,v,w,m9,m11,m13,m14,m15;
float mom[9];
PhysicalMoments(mom,f);
for(int i = 0; i<9; i++)
g_interp[buff_mem_interp(i,x-int(LRX0),y-int(LRY0),pitch_interp,zInner)]=mom[i];
// }
}
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[t-STARTF],sumX[0]);
atomicAdd(&FY[t-STARTF],sumY[0]);
atomicAdd(&FZ[t-STARTF],sumZ[0]);
}
}
}
__global__ void update_inn(float* fB, float* fA, float* g, float* h, float omega, size_t pitch, int GPU, int zInner, float* velAv_u, float* velAv_v, float* velFluc_u, float* velFluc_v, float* FX, float* FY, float* FZ, int t, int flag_F, float* f_interp, size_t pitch_interp)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,GPU*(zInner+2)+1+z);
float f[19];
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f[ 0] = fA[j];
f[ 1] = fA[f_mem (1 ,x-1,y ,z ,pitch, zInner)];
f[ 3] = fA[f_mem (3 ,x+1,y ,z ,pitch, zInner)];
f[ 2] = fA[f_mem (2 ,x ,y-1,z ,pitch, zInner)];
f[ 5] = fA[f_mem (5 ,x-1,y-1,z ,pitch, zInner)];
f[ 6] = fA[f_mem (6 ,x+1,y-1,z ,pitch, zInner)];
f[ 4] = fA[f_mem (4 ,x ,y+1,z ,pitch, zInner)];
f[ 7] = fA[f_mem (7 ,x+1,y+1,z ,pitch, zInner)];
f[ 8] = fA[f_mem (8 ,x-1,y+1,z ,pitch, zInner)];
if(z==zInner-1){//top nodes need info from h
f[ 9] = fA[f_mem (9 ,x ,y ,z-1,pitch, zInner)];
f[10]= fA[f_mem (10,x-1,y ,z-1,pitch, zInner)];
f[11]= fA[f_mem (11,x ,y-1,z-1,pitch, zInner)];
f[12]= fA[f_mem (12,x+1,y ,z-1,pitch, zInner)];
f[13]= fA[f_mem (13,x ,y+1,z-1,pitch, zInner)];
f[14]= h [buff_mem(14,x ,y ,pitch)];
f[15]= h [buff_mem(15,x-1,y ,pitch)];
f[16]= h [buff_mem(16,x ,y-1,pitch)];
f[17]= h [buff_mem(17,x+1,y ,pitch)];
f[18]= h [buff_mem(18,x ,y+1,pitch)];
}
else if(z==0){//bottom nodes need info from g
f[ 9] =g [buff_mem(9 ,x ,y ,pitch)];
f[10]= g [buff_mem(10,x-1,y ,pitch)];
f[11]= g [buff_mem(11,x ,y-1,pitch)];
f[12]= g [buff_mem(12,x+1,y ,pitch)];
f[13]= g [buff_mem(13,x ,y+1,pitch)];
f[14]= fA[f_mem (14,x ,y ,z+1,pitch, zInner)];
f[15]= fA[f_mem (15,x-1,y ,z+1,pitch, zInner)];
f[16]= fA[f_mem (16,x ,y-1,z+1,pitch, zInner)];
f[17]= fA[f_mem (17,x+1,y ,z+1,pitch, zInner)];
f[18]= fA[f_mem (18,x ,y+1,z+1,pitch, zInner)];
}
else{//normal nodes
f[ 9] = fA[f_mem(9 ,x ,y ,z-1,pitch,zInner)];
f[10]= fA[f_mem(10,x-1,y ,z-1,pitch,zInner)];
f[11]= fA[f_mem(11,x ,y-1,z-1,pitch,zInner)];
f[12]= fA[f_mem(12,x+1,y ,z-1,pitch,zInner)];
f[13]= fA[f_mem(13,x ,y+1,z-1,pitch,zInner)];
f[14]= fA[f_mem(14,x ,y ,z+1,pitch,zInner)];
f[15]= fA[f_mem(15,x-1,y ,z+1,pitch,zInner)];
f[16]= fA[f_mem(16,x ,y-1,z+1,pitch,zInner)];
f[17]= fA[f_mem(17,x+1,y ,z+1,pitch,zInner)];
f[18]= fA[f_mem(18,x ,y+1,z+1,pitch,zInner)];
}//end normal nodes
if(im == 1 || im ==10){//BB
if(im == 10 && flag_F == 1){
check[0] = 1;
sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6];
sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17];
sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6];
sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18];
sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13];
sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
fB[f_mem(1 ,x,y,z,pitch,zInner)] = f[ 3] ;
fB[f_mem(2 ,x,y,z,pitch,zInner)] = f[ 4] ;
fB[f_mem(3 ,x,y,z,pitch,zInner)] = f[ 1] ;
fB[f_mem(4 ,x,y,z,pitch,zInner)] = f[ 2] ;
fB[f_mem(5 ,x,y,z,pitch,zInner)] = f[ 7] ;
fB[f_mem(6 ,x,y,z,pitch,zInner)] = f[ 8] ;
fB[f_mem(7 ,x,y,z,pitch,zInner)] = f[ 5] ;
fB[f_mem(8 ,x,y,z,pitch,zInner)] = f[ 6] ;
fB[f_mem(9 ,x,y,z,pitch,zInner)] = f[14];
fB[f_mem(10,x,y,z,pitch,zInner)] = f[17];
fB[f_mem(11,x,y,z,pitch,zInner)] = f[18];
fB[f_mem(12,x,y,z,pitch,zInner)] = f[15];
fB[f_mem(13,x,y,z,pitch,zInner)] = f[16];
fB[f_mem(14,x,y,z,pitch,zInner)] = f[ 9] ;
fB[f_mem(15,x,y,z,pitch,zInner)] = f[12];
fB[f_mem(16,x,y,z,pitch,zInner)] = f[13];
fB[f_mem(17,x,y,z,pitch,zInner)] = f[10];
fB[f_mem(18,x,y,z,pitch,zInner)] = f[11];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
if(im == 100)//north outlet
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x,y-1,z,pitch,zInner)];
North_Extrap(f,1.0f);
}
if(im == 200)//south inlet
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x,y+1,z,pitch,zInner)];
//South_Extrap(f,UMAX);
float u_in = PoisProf3D(x,GPU*(zInner+2)+1+z);
South_Extrap(f,u_in);
}
if(im == 300)//east outlet
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x-1,y,z,pitch,zInner)];
East_Extrap(f,1.0f);
}
if(im == 400)//west inlet
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x+1,y,z,pitch,zInner)];
float u_in = PoisProf3D(y,GPU*(zInner+2)+1+z);
West_Extrap(f,u_in);
}
if(im == 25)
xsymmetry_top(f,y,GPU*(zInner+2)+1+z);
if(im == 26)
xsymmetry_bot(f,y,GPU*(zInner+2)+1+z);
mrt_collide(f,omega);
if(VELAV == 1){
if(t>=START_VELAV && t<START_VELFLUC){
float u_Av = velAv_u[x+y*pitch+(z+1)*pitch*YDIM];
float v_Av = velAv_v[x+y*pitch+(z+1)*pitch*YDIM];
vel_av(f,u_Av,v_Av,t);
velAv_u[x+y*pitch+(z+1)*pitch*YDIM] = u_Av;
velAv_v[x+y*pitch+(z+1)*pitch*YDIM] = v_Av;
}
else if(t>=START_VELFLUC){
float u_Av = velAv_u[x+y*pitch+(z+1)*pitch*YDIM];
float v_Av = velAv_v[x+y*pitch+(z+1)*pitch*YDIM];
float u_fluc = velFluc_u[x+y*pitch+(z+1)*pitch*YDIM];
float v_fluc = velFluc_v[x+y*pitch+(z+1)*pitch*YDIM];
vel_fluc(f,u_Av,v_Av,u_fluc,v_fluc,t);
velFluc_u[x+y*pitch+(z+1)*pitch*YDIM] = u_fluc;
velFluc_v[x+y*pitch+(z+1)*pitch*YDIM] = v_fluc;
}
}
for(int i = 0; i<19; i++)
fB[f_mem(i ,x,y,z,pitch,zInner)] = f[ i] ;
}
if(REFINEMENT == 1){
if(x>=int(LRX0)&&x<=int(LRX0+XLRDIM*LRFACTOR)&&y>=int(LRY0)&&y<=int(LRY0+YLRDIM*LRFACTOR))
{
// if(x>int(LRX0+2)&&x<int(LRX0+XLRDIM*LRFACTOR-1)&&y>int(LRY0+2)&&y<int(LRY0+YLRDIM*LRFACTOR-1))
// {
// //do nothing
// }
// else{
//float rho,u,v,w,m9,m11,m13,m14,m15;
float mom[9];
PhysicalMoments(mom,f);
for(int i = 0; i<9; i++)
f_interp[f_mem_interp(i,x-int(LRX0),y-int(LRY0),z,pitch_interp,zInner)]=mom[i];
// }
}
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[t-STARTF],sumX[0]);
atomicAdd(&FY[t-STARTF],sumY[0]);
atomicAdd(&FZ[t-STARTF],sumZ[0]);
}
}
}
__global__ void update_top_LR(float* hB, float* hA, float* fA, float* temp,
float omega, size_t pitch, int GPU, int zInner, float* FX, float* FY, float* FZ, int t, int flag_F)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = (GPU+1)*(zInner+2)-1;//physical coord in LR region
int j = x+y*pitch;//index on padded mem (pitch in elements)
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+LRFACTOR*z;
int im = ImageFcnLR(xcoord,ycoord,zcoord);
float f[19];
__shared__ float sumX[BLOCKSIZELRX], sumY[BLOCKSIZELRX], sumZ[BLOCKSIZELRX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f[0 ]= hA [j];
f[1 ]= hA [buff_memLR(1 ,x-1,y ,pitch)];
f[3 ]= hA [buff_memLR(3 ,x+1,y ,pitch)];
f[2 ]= hA [buff_memLR(2 ,x ,y-1,pitch)];
f[5 ]= hA [buff_memLR(5 ,x-1,y-1,pitch)];
f[6 ]= hA [buff_memLR(6 ,x+1,y-1,pitch)];
f[4 ]= hA [buff_memLR(4 ,x ,y+1,pitch)];
f[7 ]= hA [buff_memLR(7 ,x+1,y+1,pitch)];
f[8 ]= hA [buff_memLR(8 ,x-1,y+1,pitch)];
f[9 ]= fA [ f_memLR(9 ,x ,y ,zInner-1,pitch, zInner)];
f[10]= fA [ f_memLR(10,x-1,y ,zInner-1,pitch, zInner)];
f[11]= fA [ f_memLR(11,x ,y-1,zInner-1,pitch, zInner)];
f[12]= fA [ f_memLR(12,x+1,y ,zInner-1,pitch, zInner)];
f[13]= fA [ f_memLR(13,x ,y+1,zInner-1,pitch, zInner)];
f[14]= temp[buff_memLR(14,x ,y ,pitch)];
f[15]= temp[buff_memLR(15,x-1,y ,pitch)];
f[16]= temp[buff_memLR(16,x ,y-1,pitch)];
f[17]= temp[buff_memLR(17,x+1,y ,pitch)];
f[18]= temp[buff_memLR(18,x ,y+1,pitch)];
if(im == 1 || im ==10){//BB
if(im == 10 && flag_F == 1){
check[0] = 1;
sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6];
sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17];
sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6];
sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18];
sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13];
sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
hB[buff_memLR(0 ,x,y,pitch)] = f[0 ];
hB[buff_memLR(1 ,x,y,pitch)] = f[3 ];
hB[buff_memLR(2 ,x,y,pitch)] = f[4 ];
hB[buff_memLR(3 ,x,y,pitch)] = f[1 ];
hB[buff_memLR(4 ,x,y,pitch)] = f[2 ];
hB[buff_memLR(5 ,x,y,pitch)] = f[7 ];
hB[buff_memLR(6 ,x,y,pitch)] = f[8 ];
hB[buff_memLR(7 ,x,y,pitch)] = f[5 ];
hB[buff_memLR(8 ,x,y,pitch)] = f[6 ];
hB[buff_memLR(9 ,x,y,pitch)] = f[14];
hB[buff_memLR(10,x,y,pitch)] = f[17];
hB[buff_memLR(11,x,y,pitch)] = f[18];
hB[buff_memLR(12,x,y,pitch)] = f[15];
hB[buff_memLR(13,x,y,pitch)] = f[16];
hB[buff_memLR(14,x,y,pitch)] = f[9 ];
hB[buff_memLR(15,x,y,pitch)] = f[12];
hB[buff_memLR(16,x,y,pitch)] = f[13];
hB[buff_memLR(17,x,y,pitch)] = f[10];
hB[buff_memLR(18,x,y,pitch)] = f[11];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
mrt_collide(f,omega);
for(int i = 0; i<19; i++)
hB[buff_memLR(i ,x,y,pitch)] = f[i ];
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[t-STARTF],sumX[0]);
atomicAdd(&FY[t-STARTF],sumY[0]);
atomicAdd(&FZ[t-STARTF],sumZ[0]);
}
}
}
__global__ void update_bot_LR(float* gB, float* gA, float* fA, float* temp,
float omega, size_t pitch, int GPU, int zInner, float* FX, float* FY, float* FZ, int t, int flag_F)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = (zInner+2)-1;
int j = x+y*pitch;//index on padded mem (pitch in elements)
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+GPU*LRFACTOR*z;
int im = ImageFcnLR(xcoord,ycoord,zcoord);
float f[19];
__shared__ float sumX[BLOCKSIZELRX], sumY[BLOCKSIZELRX], sumZ[BLOCKSIZELRX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f[0 ]= gA [j];
f[1 ]= gA [buff_memLR(1 ,x-1,y ,pitch)];
f[3 ]= gA [buff_memLR(3 ,x+1,y ,pitch)];
f[2 ]= gA [buff_memLR(2 ,x ,y-1,pitch)];
f[5 ]= gA [buff_memLR(5 ,x-1,y-1,pitch)];
f[6 ]= gA [buff_memLR(6 ,x+1,y-1,pitch)];
f[4 ]= gA [buff_memLR(4 ,x ,y+1,pitch)];
f[7 ]= gA [buff_memLR(7 ,x+1,y+1,pitch)];
f[8 ]= gA [buff_memLR(8 ,x-1,y+1,pitch)];
f[9 ]= temp[buff_memLR(9 ,x ,y ,pitch)];
f[10]= temp[buff_memLR(10,x-1,y ,pitch)];
f[11]= temp[buff_memLR(11,x ,y-1,pitch)];
f[12]= temp[buff_memLR(12,x+1,y ,pitch)];
f[13]= temp[buff_memLR(13,x ,y+1,pitch)];
f[14]= fA [ f_memLR(14,x ,y ,0,pitch, zInner)];
f[15]= fA [ f_memLR(15,x-1,y ,0,pitch, zInner)];
f[16]= fA [ f_memLR(16,x ,y-1,0,pitch, zInner)];
f[17]= fA [ f_memLR(17,x+1,y ,0,pitch, zInner)];
f[18]= fA [ f_memLR(18,x ,y+1,0,pitch, zInner)];
if(im == 1 || im ==10){//BB
if(im == 10 && flag_F == 1){
check[0] = 1;
sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6];
sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17];
sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6];
sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18];
sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13];
sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
gB[buff_memLR(0 ,x,y,pitch)] = f[0 ];
gB[buff_memLR(1 ,x,y,pitch)] = f[3 ];
gB[buff_memLR(2 ,x,y,pitch)] = f[4 ];
gB[buff_memLR(3 ,x,y,pitch)] = f[1 ];
gB[buff_memLR(4 ,x,y,pitch)] = f[2 ];
gB[buff_memLR(5 ,x,y,pitch)] = f[7 ];
gB[buff_memLR(6 ,x,y,pitch)] = f[8 ];
gB[buff_memLR(7 ,x,y,pitch)] = f[5 ];
gB[buff_memLR(8 ,x,y,pitch)] = f[6 ];
gB[buff_memLR(9 ,x,y,pitch)] = f[14];
gB[buff_memLR(10,x,y,pitch)] = f[17];
gB[buff_memLR(11,x,y,pitch)] = f[18];
gB[buff_memLR(12,x,y,pitch)] = f[15];
gB[buff_memLR(13,x,y,pitch)] = f[16];
gB[buff_memLR(14,x,y,pitch)] = f[9 ];
gB[buff_memLR(15,x,y,pitch)] = f[12];
gB[buff_memLR(16,x,y,pitch)] = f[13];
gB[buff_memLR(17,x,y,pitch)] = f[10];
gB[buff_memLR(18,x,y,pitch)] = f[11];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
mrt_collide(f,omega);
for(int i = 0; i<19; i++)
gB[buff_memLR(i ,x,y,pitch)] = f[i ];
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[t-STARTF],sumX[0]);
atomicAdd(&FY[t-STARTF],sumY[0]);
atomicAdd(&FZ[t-STARTF],sumZ[0]);
}
}
}
__global__ void update_inn_LR(float* fB, float* fA, float* g, float* h, float omega, size_t pitch, int GPU, int zInner, float* velAv_u, float* velAv_v, float* velFluc_u, float* velFluc_v, float* FX, float* FY, float* FZ, int t, int flag_F)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
int im = ImageFcnLR(LRX0+LRFACTOR*x,LRY0+LRFACTOR*y,LRZ0+LRFACTOR*(GPU*(zInner+2)+1+z));
float f[19];
__shared__ float sumX[BLOCKSIZELRX], sumY[BLOCKSIZELRX], sumZ[BLOCKSIZELRX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f[ 0] = fA[j];
f[ 1] = fA[f_memLR (1 ,x-1,y ,z ,pitch, zInner)];
f[ 3] = fA[f_memLR (3 ,x+1,y ,z ,pitch, zInner)];
f[ 2] = fA[f_memLR (2 ,x ,y-1,z ,pitch, zInner)];
f[ 5] = fA[f_memLR (5 ,x-1,y-1,z ,pitch, zInner)];
f[ 6] = fA[f_memLR (6 ,x+1,y-1,z ,pitch, zInner)];
f[ 4] = fA[f_memLR (4 ,x ,y+1,z ,pitch, zInner)];
f[ 7] = fA[f_memLR (7 ,x+1,y+1,z ,pitch, zInner)];
f[ 8] = fA[f_memLR (8 ,x-1,y+1,z ,pitch, zInner)];
if(z==zInner-1){//top nodes need info from h
f[ 9] =fA[ f_memLR(9 ,x ,y ,z-1,pitch, zInner)];
f[10]= fA[ f_memLR(10,x-1,y ,z-1,pitch, zInner)];
f[11]= fA[ f_memLR(11,x ,y-1,z-1,pitch, zInner)];
f[12]= fA[ f_memLR(12,x+1,y ,z-1,pitch, zInner)];
f[13]= fA[ f_memLR(13,x ,y+1,z-1,pitch, zInner)];
f[14]= h [buff_memLR(14,x ,y ,pitch)];
f[15]= h [buff_memLR(15,x-1,y ,pitch)];
f[16]= h [buff_memLR(16,x ,y-1,pitch)];
f[17]= h [buff_memLR(17,x+1,y ,pitch)];
f[18]= h [buff_memLR(18,x ,y+1,pitch)];
}
else if(z==0){//bottom nodes need info from g
f[ 9] =g [buff_memLR(9 ,x ,y ,pitch)];
f[10]= g [buff_memLR(10,x-1,y ,pitch)];
f[11]= g [buff_memLR(11,x ,y-1,pitch)];
f[12]= g [buff_memLR(12,x+1,y ,pitch)];
f[13]= g [buff_memLR(13,x ,y+1,pitch)];
f[14]= fA[ f_memLR(14,x ,y ,z+1,pitch, zInner)];
f[15]= fA[ f_memLR(15,x-1,y ,z+1,pitch, zInner)];
f[16]= fA[ f_memLR(16,x ,y-1,z+1,pitch, zInner)];
f[17]= fA[ f_memLR(17,x+1,y ,z+1,pitch, zInner)];
f[18]= fA[ f_memLR(18,x ,y+1,z+1,pitch, zInner)];
}
else{//normal nodes
f[ 9] =fA[f_memLR(9 ,x ,y ,z-1,pitch,zInner)];
f[10]= fA[f_memLR(10,x-1,y ,z-1,pitch,zInner)];
f[11]= fA[f_memLR(11,x ,y-1,z-1,pitch,zInner)];
f[12]= fA[f_memLR(12,x+1,y ,z-1,pitch,zInner)];
f[13]= fA[f_memLR(13,x ,y+1,z-1,pitch,zInner)];
f[14]= fA[f_memLR(14,x ,y ,z+1,pitch,zInner)];
f[15]= fA[f_memLR(15,x-1,y ,z+1,pitch,zInner)];
f[16]= fA[f_memLR(16,x ,y-1,z+1,pitch,zInner)];
f[17]= fA[f_memLR(17,x+1,y ,z+1,pitch,zInner)];
f[18]= fA[f_memLR(18,x ,y+1,z+1,pitch,zInner)];
}//end normal nodes
if(im == 1 || im ==10){//BB
if(im == 10 && flag_F == 1){
check[0] = 1;
sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6];
sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17];
sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6];
sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18];
sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13];
sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
fB[f_memLR(1 ,x,y,z,pitch,zInner)] = f[ 3] ;
fB[f_memLR(2 ,x,y,z,pitch,zInner)] = f[ 4] ;
fB[f_memLR(3 ,x,y,z,pitch,zInner)] = f[ 1] ;
fB[f_memLR(4 ,x,y,z,pitch,zInner)] = f[ 2] ;
fB[f_memLR(5 ,x,y,z,pitch,zInner)] = f[ 7] ;
fB[f_memLR(6 ,x,y,z,pitch,zInner)] = f[ 8] ;
fB[f_memLR(7 ,x,y,z,pitch,zInner)] = f[ 5] ;
fB[f_memLR(8 ,x,y,z,pitch,zInner)] = f[ 6] ;
fB[f_memLR(9 ,x,y,z,pitch,zInner)] = f[14];
fB[f_memLR(10,x,y,z,pitch,zInner)] = f[17];
fB[f_memLR(11,x,y,z,pitch,zInner)] = f[18];
fB[f_memLR(12,x,y,z,pitch,zInner)] = f[15];
fB[f_memLR(13,x,y,z,pitch,zInner)] = f[16];
fB[f_memLR(14,x,y,z,pitch,zInner)] = f[ 9] ;
fB[f_memLR(15,x,y,z,pitch,zInner)] = f[12];
fB[f_memLR(16,x,y,z,pitch,zInner)] = f[13];
fB[f_memLR(17,x,y,z,pitch,zInner)] = f[10];
fB[f_memLR(18,x,y,z,pitch,zInner)] = f[11];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
mrt_collide(f,omega);
if(VELAV == 1){
if(t>=START_VELAV && t<START_VELFLUC){
float u_Av = velAv_u[x+y*pitch+(z+1)*pitch*YLRDIM];
float v_Av = velAv_v[x+y*pitch+(z+1)*pitch*YLRDIM];
vel_avLR(f,u_Av,v_Av,t);
velAv_u[x+y*pitch+(z+1)*pitch*YLRDIM] = u_Av;
velAv_v[x+y*pitch+(z+1)*pitch*YLRDIM] = v_Av;
}
else if(t>=START_VELFLUC){
float u_Av = velAv_u[x+y*pitch+(z+1)*pitch*YLRDIM];
float v_Av = velAv_v[x+y*pitch+(z+1)*pitch*YLRDIM];
float u_fluc = velFluc_u[x+y*pitch+(z+1)*pitch*YLRDIM];
float v_fluc = velFluc_v[x+y*pitch+(z+1)*pitch*YLRDIM];
vel_flucLR(f,u_Av,v_Av,u_fluc,v_fluc,t);
velFluc_u[x+y*pitch+(z+1)*pitch*YLRDIM] = u_fluc;
velFluc_v[x+y*pitch+(z+1)*pitch*YLRDIM] = v_fluc;
}
}
for(int i = 0; i<19; i++)
fB[f_memLR(i ,x,y,z,pitch,zInner)] = f[ i] ;
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[t-STARTF],sumX[0]);
atomicAdd(&FY[t-STARTF],sumY[0]);
atomicAdd(&FZ[t-STARTF],sumZ[0]);
}
}
}
/*
InterpCF is used on the LR grid. It first uses part of its threads to read from the coarse mesh nodes that completely envelope the fine mesh nodes, and loads the f's into shared memory. Next, all threads use the shared memory data to interpolate and scale the f's
*/
__global__ void InterpCF(float* f_f, float* g_f, float* h_f, size_t pitch_f, float* m_f_c, float* m_g_c, float* m_h_c, float* m_g_temp, size_t pitch_m, float SF, float omega_c, int zInner, int zInner_f)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
__shared__ float mom_c[BLOCKSIZEINTERP][2][2][9];
__shared__ float S_c[BLOCKSIZEINTERP][2][2][6];
int GPU = 0;
int im = ImageFcnLR(LRX0+LRFACTOR*x,LRY0+LRFACTOR*y,LRZ0+LRFACTOR*(GPU*(zInner+2)+z));
if(blockIdx.z == 0 && threadIdx.x<ceil(BLOCKSIZEINTERP*LRFACTOR)+1 && threadIdx.z<2 && threadIdx.y<2)
{
//use g and g_temp
int x_c = threadIdx.x+blockIdx.x*BLOCKSIZEINTERP*LRFACTOR;//in coarse grid, blockdim.x is LRX*LRFACTOR
int y_c = threadIdx.y+blockIdx.y;//in coarse grid, blockdim.y is 1
int ymax = YLRDIM*LRFACTOR+1;
if(threadIdx.z == 0){
for(int i = 0; i<9; i++)
mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_g_temp[x_c+y_c*pitch_m+i*ymax*pitch_m];
}
else{
for(int i = 0; i<9; i++)
mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_g_c[x_c+y_c*pitch_m+i*ymax*pitch_m];
}
// float S[6];//float m_strain[9];
// for(int i = 0; i<9; i++)
// m_strain[i] = mom_c[i][threadIdx.x][threadIdx.y][threadIdx.z];
// for(int i = 0; i<6; i++)
// S_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= S[i];
StrainRate(S_c[threadIdx.x][threadIdx.y][threadIdx.z],mom_c[threadIdx.x][threadIdx.y][threadIdx.z],omega_c);
}
else if(blockIdx.z == 1 && threadIdx.x<ceil(BLOCKSIZEINTERP*LRFACTOR)+1 && threadIdx.z<2 && threadIdx.y<2)
{
//use g and f
int x_c = threadIdx.x+blockIdx.x*BLOCKSIZEINTERP*LRFACTOR;//in coarse grid, blockdim.x is LRX*LRFACTOR
int y_c = threadIdx.y+blockIdx.y;//in coarse grid, blockdim.y is 1
int ymax = YLRDIM*LRFACTOR+1;
if(threadIdx.z == 0){
for(int i = 0; i<9; i++)
mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_g_c[x_c+y_c*pitch_m+i*ymax*pitch_m];
}
else{
for(int i = 0; i<9; i++)
mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_f_c[x_c+y_c*pitch_m+i*ymax*pitch_m*zInner];
}
// float S[6];//float m_strain[9];
// for(int i = 0; i<9; i++)
// m_strain[i] = mom_c[i][threadIdx.x][threadIdx.y][threadIdx.z];
// for(int i = 0; i<6; i++)
// S_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= S[i];
StrainRate(S_c[threadIdx.x][threadIdx.y][threadIdx.z],mom_c[threadIdx.x][threadIdx.y][threadIdx.z],omega_c);
}
else if(blockIdx.z == zInner+1 && threadIdx.x<ceil(BLOCKSIZEINTERP*LRFACTOR)+1 && threadIdx.z<2 && threadIdx.y<2)
{
//use h and f
int x_c = threadIdx.x+blockIdx.x*BLOCKSIZEINTERP*LRFACTOR;//in coarse grid, blockdim.x is LRX*LRFACTOR
int y_c = threadIdx.y+blockIdx.y;//in coarse grid, blockdim.y is 1
int ymax = YLRDIM*LRFACTOR+1;
if(threadIdx.z == 0){
for(int i = 0; i<9; i++)
mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_f_c[x_c+y_c*pitch_m+(zInner-1)*ymax*pitch_m+i*ymax*pitch_m*zInner];
}
else{
for(int i = 0; i<9; i++)
mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_h_c[x_c+y_c*pitch_m+i*ymax*pitch_m];
}
// float S[6];//float m_strain[9];
// for(int i = 0; i<9; i++)
// m_strain[i] = mom_c[i][threadIdx.x][threadIdx.y][threadIdx.z];
// for(int i = 0; i<6; i++)
// S_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= S[i];
StrainRate(S_c[threadIdx.x][threadIdx.y][threadIdx.z],mom_c[threadIdx.x][threadIdx.y][threadIdx.z],omega_c);
}
else if(threadIdx.x<ceil(BLOCKSIZEINTERP*LRFACTOR)+1 && threadIdx.z<2 && threadIdx.y<2){//use f only
int x_c = threadIdx.x+blockIdx.x*BLOCKSIZEINTERP*LRFACTOR;//in coarse grid, blockdim.x is LRX*LRFACTOR
int y_c = threadIdx.y+blockIdx.y;//in coarse grid, blockdim.y is 1
int z_c = threadIdx.z+blockIdx.z-2;//in coarse grid, blockdim.z is 1; -2 to account for g and lower halo
int ymax = YLRDIM*LRFACTOR+1;
for(int i = 0; i<9; i++)
mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_f_c[x_c+y_c*pitch_m+z_c*ymax*pitch_m+i*ymax*pitch_m*zInner];
// float S[6];//float m_strain[9];
// for(int i = 0; i<9; i++)
// m_strain[i] = mom_c[i][threadIdx.x][threadIdx.y][threadIdx.z];
// for(int i = 0; i<6; i++)
// S_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= S[i];
StrainRate(S_c[threadIdx.x][threadIdx.y][threadIdx.z],mom_c[threadIdx.x][threadIdx.y][threadIdx.z],omega_c);
}
syncthreads();
if(x<LRLEVEL || x>XLRDIM-LRLEVEL-1 || y<LRLEVEL || y>YLRDIM-LRLEVEL-1){
//if(x<LRLEVEL || x>XLRDIM-LRLEVEL-2 || y<LRLEVEL || y>YLRDIM-LRLEVEL-2){
//interpolate from shared mem
int xm = int(threadIdx.x*LRFACTOR+LRFACTOR*0.5f);
int ym = int(threadIdx.y*LRFACTOR+LRFACTOR*0.5f);
int zm = int(threadIdx.z*LRFACTOR+LRFACTOR*0.5f);
int xp = xm+1; //int yp = ym+1; int zp = zm+1;
float xf = (threadIdx.x*LRFACTOR+LRFACTOR*0.5f)-xm;
float yf = (threadIdx.y*LRFACTOR+LRFACTOR*0.5f)-ym;
float zf = (threadIdx.z*LRFACTOR+LRFACTOR*0.5f)-zm;
float mom[9];
for(int i = 0; i<9; i++){
float v000 = mom_c[xm][0][0][i];
float v001 = mom_c[xp][0][0][i];
float v010 = mom_c[xm][1][0][i];
float v011 = mom_c[xp][1][0][i];
float v100 = mom_c[xm][0][1][i];
float v101 = mom_c[xp][0][1][i];
float v110 = mom_c[xm][1][1][i];
float v111 = mom_c[xp][1][1][i];
mom[i] = trilinear_interp(v000, v001, v010, v011, v100, v101, v110, v111, xf, yf, zf);
}
if(ORDER == 2)
{
float u_x1,u_x2,u_x3,u_x4,u_x5,u_x6,u_x7,u_x8;
float v_y1,v_y2,v_y3,v_y4,v_y5,v_y6,v_y7,v_y8;
float w_z1,w_z2,w_z3,w_z4,w_z5,w_z6,w_z7,w_z8;
float Sxy1,Sxy2,Sxy3,Sxy4,Sxy5,Sxy6,Sxy7,Sxy8;
float Syz1,Syz2,Syz3,Syz4,Syz5,Syz6,Syz7,Syz8;
float Sxz1,Sxz2,Sxz3,Sxz4,Sxz5,Sxz6,Sxz7,Sxz8;
u_x1=S_c[xm][0][0][0];v_y1=S_c[xm][0][0][1];w_z1=S_c[xm][0][0][2];Sxy1=S_c[xm][0][0][3];Syz1=S_c[xm][0][0][4];Sxz1=S_c[xm][0][0][5];
u_x2=S_c[xp][0][0][0];v_y2=S_c[xp][0][0][1];w_z2=S_c[xp][0][0][2];Sxy2=S_c[xp][0][0][3];Syz2=S_c[xp][0][0][4];Sxz2=S_c[xp][0][0][5];
u_x3=S_c[xm][1][0][0];v_y3=S_c[xm][1][0][1];w_z3=S_c[xm][1][0][2];Sxy3=S_c[xm][1][0][3];Syz3=S_c[xm][1][0][4];Sxz3=S_c[xm][1][0][5];
u_x4=S_c[xp][1][0][0];v_y4=S_c[xp][1][0][1];w_z4=S_c[xp][1][0][2];Sxy4=S_c[xp][1][0][3];Syz4=S_c[xp][1][0][4];Sxz4=S_c[xp][1][0][5];
u_x5=S_c[xm][0][1][0];v_y5=S_c[xm][0][1][1];w_z5=S_c[xm][0][1][2];Sxy5=S_c[xm][0][1][3];Syz5=S_c[xm][0][1][4];Sxz5=S_c[xm][0][1][5];
u_x6=S_c[xp][0][1][0];v_y6=S_c[xp][0][1][1];w_z6=S_c[xp][0][1][2];Sxy6=S_c[xp][0][1][3];Syz6=S_c[xp][0][1][4];Sxz6=S_c[xp][0][1][5];
u_x7=S_c[xm][1][1][0];v_y7=S_c[xm][1][1][1];w_z7=S_c[xm][1][1][2];Sxy7=S_c[xm][1][1][3];Syz7=S_c[xm][1][1][4];Sxz7=S_c[xm][1][1][5];
u_x8=S_c[xp][1][1][0];v_y8=S_c[xp][1][1][1];w_z8=S_c[xp][1][1][2];Sxy8=S_c[xp][1][1][3];Syz8=S_c[xp][1][1][4];Sxz8=S_c[xp][1][1][5];
float m03,m05,m07, m13,m15,m17, m23,m25,m27, m33,m35,m37, m43,m45,m47, m53,m55,m57, m63,m65,m67, m73,m75,m77;
m03=mom_c[xm][0][0][1];m05=mom_c[xm][0][0][2];m07=mom_c[xm][0][0][3];
m13=mom_c[xp][0][0][1];m15=mom_c[xp][0][0][2];m17=mom_c[xp][0][0][3];
m23=mom_c[xm][1][0][1];m25=mom_c[xm][1][0][2];m27=mom_c[xm][1][0][3];
m33=mom_c[xp][1][0][1];m35=mom_c[xp][1][0][2];m37=mom_c[xp][1][0][3];
m43=mom_c[xm][0][1][1];m45=mom_c[xm][0][1][2];m47=mom_c[xm][0][1][3];
m53=mom_c[xp][0][1][1];m55=mom_c[xp][0][1][2];m57=mom_c[xp][0][1][3];
m63=mom_c[xm][1][1][1];m65=mom_c[xm][1][1][2];m67=mom_c[xm][1][1][3];
m73=mom_c[xp][1][1][1];m75=mom_c[xp][1][1][2];m77=mom_c[xp][1][1][3];
float cx = -((u_x8-u_x7+u_x6-u_x5+u_x4-u_x3+u_x2-u_x1))*0.03125f;
float cy = -((Sxy8+Sxy7-Sxy6-Sxy5+Sxy4+Sxy3-Sxy2-Sxy1)-m75+m65+m55-m45-m35+m25+m15-m05)*0.0625f;
float cz = -((Sxz8+Sxz7+Sxz6+Sxz5-Sxz4-Sxz3-Sxz2-Sxz1)-m77+m67-m57+m47+m37-m27+m17-m07)*0.0625f;
float dx = -((Sxy8-Sxy7+Sxy6-Sxy5+Sxy4-Sxy3+Sxy2-Sxy1)-m73+m63+m53-m43-m33+m23+m13-m03)*0.0625f;
float dy = -((v_y8+v_y7-v_y6-v_y5+v_y4+v_y3-v_y2-v_y1))*0.03125f;
float dz = -((Syz8+Syz7+Syz6+Syz5-Syz4-Syz3-Syz2-Syz1)-m77-m67+m57+m47+m37+m27-m17-m07)*0.0625f;
float ex = -((Sxz8-Sxz7+Sxz6-Sxz5+Sxz4-Sxz3+Sxz2-Sxz1)-m73+m63-m53+m43+m33-m23+m13-m03)*0.0625f;
float ey = -((Syz8+Syz7-Syz6-Syz5+Syz4+Syz3-Syz2-Syz1)-m75-m65+m55+m45+m35+m25-m15-m05)*0.0625f;
float ez = -((w_z8+w_z7+w_z6+w_z5-w_z4-w_z3-w_z2-w_z1))*0.03125f;
float xpr = 4.f*xf*xf-4.f*xf+1.f;
float ypr = 4.f*yf*yf-4.f*yf+1.f;
float zpr = 4.f*zf*zf-4.f*zf+1.f;
mom[1] += cx*(1.f-xpr)+cy*(1.f-ypr)+cz*(1.f-zpr);
mom[2] += dx*(1.f-xpr)+dy*(1.f-ypr)+dz*(1.f-zpr);
mom[3] += ex*(1.f-xpr)+ey*(1.f-ypr)+ez*(1.f-zpr);
}
float f[19];
InvertPhysicalMoments(f,mom,SF);
if(im != 1 && im != 10){
if(z==0){
for(int i = 0; i<19; i++){
g_f[buff_memLR(i,x,y,pitch_f)]=f[i];
}
}
else if(z==gridDim.z*blockDim.z-1){
for(int i = 0; i<19; i++){
h_f[buff_memLR(i,x,y,pitch_f)]=f[i];
}
}
else{
for(int i = 0; i<19; i++){
f_f[f_memLR(i,x,y,z-1,pitch_f,zInner_f)]=f[i];
}
}
}
}
}
__global__ void InterpFC(float* f_c, float* g_c, float* h_c, float* f_f, float* h_f, float* temp_f, size_t pitch_c, size_t pitch_f, float SF, float omega_f, int zInner, int zInner_f)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
//if( (x > LRX0+1 && x < LRX0+XLRDIM*LRFACTOR-1 && y > LRY0+1 && y < LRY0+YLRDIM*LRFACTOR-1) &&
//(x == int(LRX0+2) || x == int(LRX0+XLRDIM*LRFACTOR-2) || y == int(LRY0+2) || y == int(LRY0+YLRDIM*LRFACTOR-2)))
//(true))
if( (x > LRX0+1 && x < LRX0+XLRDIM*LRFACTOR-2 && y > LRY0+1 && y < LRY0+YLRDIM*LRFACTOR-2) &&
//(x == int(LRX0+2) || x == int(LRX0+XLRDIM*LRFACTOR-2) || y == int(LRY0+2) || y == int(LRY0+YLRDIM*LRFACTOR-2)))
(true))
{
float f[19];
float mom[8][9];//physical moments of 8 neighboring nodes
float S_f[8][6];//strain rate tensor of 8 neighboring nodes
int xm = LRLEVEL*(x-LRX0);
int ym = LRLEVEL*(y-LRY0);
int zm = LRLEVEL*(z-(-(1.f-0.5f*LRFACTOR)))-1;//LRZ0=-(1.f-0.5f*LRFACTOR), and -1 to account for g_LR
int xp = xm+1;
int yp = ym+1;
int zp = zm+1;
//top nodes. interp between h and h_temp. output to h
if(z == zInner+1)
{
for(int i = 0; i<19; i++)
f[i] = temp_f[buff_memLR(i,xm,ym,pitch_f)];
PhysicalMoments(mom[0],f);
StrainRate(S_f[0],mom[0],omega_f);
for(int i = 0; i<19; i++)
f[i] = temp_f[buff_memLR(i,xp,ym,pitch_f)];
PhysicalMoments(mom[1],f);
StrainRate(S_f[1],mom[1],omega_f);
for(int i = 0; i<19; i++)
f[i] = temp_f[buff_memLR(i,xm,yp,pitch_f)];
PhysicalMoments(mom[2],f);
StrainRate(S_f[2],mom[2],omega_f);
for(int i = 0; i<19; i++)
f[i] = temp_f[buff_memLR(i,xp,yp,pitch_f)];
PhysicalMoments(mom[3],f);
StrainRate(S_f[3],mom[3],omega_f);
for(int i = 0; i<19; i++)
f[i] = h_f[buff_memLR(i,xm,ym,pitch_f)];
PhysicalMoments(mom[4],f);
StrainRate(S_f[4],mom[4],omega_f);
for(int i = 0; i<19; i++)
f[i] = h_f[buff_memLR(i,xp,ym,pitch_f)];
PhysicalMoments(mom[5],f);
StrainRate(S_f[5],mom[5],omega_f);
for(int i = 0; i<19; i++)
f[i] = h_f[buff_memLR(i,xm,yp,pitch_f)];
PhysicalMoments(mom[6],f);
StrainRate(S_f[6],mom[6],omega_f);
for(int i = 0; i<19; i++)
f[i] = h_f[buff_memLR(i,xp,yp,pitch_f)];
PhysicalMoments(mom[7],f);
StrainRate(S_f[7],mom[7],omega_f);
}
//inner nodes. output to g or f
else{
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xm,ym,zm,pitch_f,zInner_f)];
PhysicalMoments(mom[0],f);
StrainRate(S_f[0],mom[0],omega_f);
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xp,ym,zm,pitch_f,zInner_f)];
PhysicalMoments(mom[1],f);
StrainRate(S_f[1],mom[1],omega_f);
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xm,yp,zm,pitch_f,zInner_f)];
PhysicalMoments(mom[2],f);
StrainRate(S_f[2],mom[2],omega_f);
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xp,yp,zm,pitch_f,zInner_f)];
PhysicalMoments(mom[3],f);
StrainRate(S_f[3],mom[3],omega_f);
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xm,ym,zp,pitch_f,zInner_f)];
PhysicalMoments(mom[4],f);
StrainRate(S_f[4],mom[4],omega_f);
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xp,ym,zp,pitch_f,zInner_f)];
PhysicalMoments(mom[5],f);
StrainRate(S_f[5],mom[5],omega_f);
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xm,yp,zp,pitch_f,zInner_f)];
PhysicalMoments(mom[6],f);
StrainRate(S_f[6],mom[6],omega_f);
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xp,yp,zp,pitch_f,zInner_f)];
PhysicalMoments(mom[7],f);
StrainRate(S_f[7],mom[7],omega_f);
}
if(ORDER == 1){
for(int i = 0; i<9; i++)
mom[0][i] = 0.125f*(mom[0][i]+mom[1][i]+mom[2][i]+mom[3][i]+mom[4][i]+mom[5][i]+mom[6][i]+mom[7][i]);
}
else if(ORDER == 2)
{
float u_x1,u_x2,u_x3,u_x4,u_x5,u_x6,u_x7,u_x8;
float v_y1,v_y2,v_y3,v_y4,v_y5,v_y6,v_y7,v_y8;
float w_z1,w_z2,w_z3,w_z4,w_z5,w_z6,w_z7,w_z8;
float Sxy1,Sxy2,Sxy3,Sxy4,Sxy5,Sxy6,Sxy7,Sxy8;
float Syz1,Syz2,Syz3,Syz4,Syz5,Syz6,Syz7,Syz8;
float Sxz1,Sxz2,Sxz3,Sxz4,Sxz5,Sxz6,Sxz7,Sxz8;
u_x1=S_f[0][0];v_y1=S_f[0][1];w_z1=S_f[0][2];Sxy1=S_f[0][3];Syz1=S_f[0][4];Sxz1=S_f[0][5];
u_x2=S_f[1][0];v_y2=S_f[1][1];w_z2=S_f[1][2];Sxy2=S_f[1][3];Syz2=S_f[1][4];Sxz2=S_f[1][5];
u_x3=S_f[2][0];v_y3=S_f[2][1];w_z3=S_f[2][2];Sxy3=S_f[2][3];Syz3=S_f[2][4];Sxz3=S_f[2][5];
u_x4=S_f[3][0];v_y4=S_f[3][1];w_z4=S_f[3][2];Sxy4=S_f[3][3];Syz4=S_f[3][4];Sxz4=S_f[3][5];
u_x5=S_f[4][0];v_y5=S_f[4][1];w_z5=S_f[4][2];Sxy5=S_f[4][3];Syz5=S_f[4][4];Sxz5=S_f[4][5];
u_x6=S_f[5][0];v_y6=S_f[5][1];w_z6=S_f[5][2];Sxy6=S_f[5][3];Syz6=S_f[5][4];Sxz6=S_f[5][5];
u_x7=S_f[6][0];v_y7=S_f[6][1];w_z7=S_f[6][2];Sxy7=S_f[6][3];Syz7=S_f[6][4];Sxz7=S_f[6][5];
u_x8=S_f[7][0];v_y8=S_f[7][1];w_z8=S_f[7][2];Sxy8=S_f[7][3];Syz8=S_f[7][4];Sxz8=S_f[7][5];
float m03,m05,m07, m13,m15,m17, m23,m25,m27, m33,m35,m37, m43,m45,m47, m53,m55,m57, m63,m65,m67, m73,m75,m77;
m03=mom[0][1];m05=mom[0][2];m07=mom[0][3];
m13=mom[1][1];m15=mom[1][2];m17=mom[1][3];
m23=mom[2][1];m25=mom[2][2];m27=mom[2][3];
m33=mom[3][1];m35=mom[3][2];m37=mom[3][3];
m43=mom[4][1];m45=mom[4][2];m47=mom[4][3];
m53=mom[5][1];m55=mom[5][2];m57=mom[5][3];
m63=mom[6][1];m65=mom[6][2];m67=mom[6][3];
m73=mom[7][1];m75=mom[7][2];m77=mom[7][3];
float cx = -((u_x8-u_x7+u_x6-u_x5+u_x4-u_x3+u_x2-u_x1))*0.03125f;
float cy = -((Sxy8+Sxy7-Sxy6-Sxy5+Sxy4+Sxy3-Sxy2-Sxy1)-m75+m65+m55-m45-m35+m25+m15-m05)*0.0625f;
float cz = -((Sxz8+Sxz7+Sxz6+Sxz5-Sxz4-Sxz3-Sxz2-Sxz1)-m77+m67-m57+m47+m37-m27+m17-m07)*0.0625f;
float dx = -((Sxy8-Sxy7+Sxy6-Sxy5+Sxy4-Sxy3+Sxy2-Sxy1)-m73+m63+m53-m43-m33+m23+m13-m03)*0.0625f;
float dy = -((v_y8+v_y7-v_y6-v_y5+v_y4+v_y3-v_y2-v_y1))*0.03125f;
float dz = -((Syz8+Syz7+Syz6+Syz5-Syz4-Syz3-Syz2-Syz1)-m77-m67+m57+m47+m37+m27-m17-m07)*0.0625f;
float ex = -((Sxz8-Sxz7+Sxz6-Sxz5+Sxz4-Sxz3+Sxz2-Sxz1)-m73+m63-m53+m43+m33-m23+m13-m03)*0.0625f;
float ey = -((Syz8+Syz7-Syz6-Syz5+Syz4+Syz3-Syz2-Syz1)-m75-m65+m55+m45+m35+m25-m15-m05)*0.0625f;
float ez = -((w_z8+w_z7+w_z6+w_z5-w_z4-w_z3-w_z2-w_z1))*0.03125f;
for(int i = 0; i<9; i++)
mom[0][i] = 0.125f*(mom[0][i]+mom[1][i]+mom[2][i]+mom[3][i]+mom[4][i]+mom[5][i]+mom[6][i]+mom[7][i]);
float xpr = 0.f;//4.f*xf*xf-4.f*xf+1.f;
float ypr = 0.f;//4.f*yf*yf-4.f*yf+1.f;
float zpr = 0.f;//4.f*zf*zf-4.f*zf+1.f;
mom[0][1] += cx*(1.f-xpr)+cy*(1.f-ypr)+cz*(1.f-zpr);
mom[0][2] += dx*(1.f-xpr)+dy*(1.f-ypr)+dz*(1.f-zpr);
mom[0][3] += ex*(1.f-xpr)+ey*(1.f-ypr)+ez*(1.f-zpr);
}
InvertPhysicalMoments(f,mom[0],SF);
//for(int i = 0; i<19; i++) f[i] = 0.1f;
int GPU = 0;
int im = ImageFcn(x,y,GPU*(zInner+2)+z);
if(im != 1 && im != 10){
if(z == 0){
for(int i = 0; i<19; i++)
g_c[buff_mem(i,x,y,pitch_c)]=f[i];
}
else if(z == zInner+1){
for(int i = 0; i<19; i++)
h_c[buff_mem(i,x,y,pitch_c)]=f[i];
}
else{
for(int i = 0; i<19; i++)
f_c[f_mem(i,x,y,z-1,pitch_c,zInner)]=f[i];
}
}
}//end extraction region
}
void WriteResults(ostream &output, float *fin, float *gin, float *hin, float **velAv,
float **velFluc, float omega, int GPU_N, int GPU)
{
float f[19];
output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"velAv[0]\",\"velAv[1]\",\"ufluc\",\"vfluc\"\n";
output<<"ZONE F=POINT, I="<<XDIM<<", J="<<YDIM<<", K="<<ZDIM/GPU_N<<"\n";
for(int j = 0; j<YDIM; j++){
for(int i = 0; i<XDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = gin[(i+j*XDIM)+l *XDIM*YDIM];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
output<<i<<", "<<j<<", "<<(ZDIM/GPU_N*GPU)<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv[0][i+j*XDIM]<<","<<velAv[1][i+j*XDIM]<<", "<<velFluc[0][i+j*XDIM]<<","<<velFluc[1][i+j*XDIM]<<endl;
}}
for(int k = 1; k<ZDIM/GPU_N-1; k++){
for(int j = 0; j<YDIM; j++){
for(int i = 0; i<XDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = fin[(i+j*XDIM)+(k-1)*XDIM*YDIM+l*XDIM*YDIM*(ZDIM/GPU_N-2)];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
float m1 =-30.f*f[0]+-11.f*f[1]+-11.f*f[2]+-11.f*f[3]+-11.f*f[4]+8.f*f[5]+8.f*f[6]+8.f*f[7]+8.f*f[8]+-11.f*f[9]+8.f*f[10]+8.f*f[11]+8.f*f[12]+8.f*f[13]+-11.f*f[14]+8.f*f[15]+8.f*f[16]+8.f*f[17]+8.f*f[18];
//float m6 = -4.f*f[2]+4.f*f[4]+f[5]+f[6]+-f[7]+-f[8]+f[11]+-f[13]+f[16]+-f[18];
float m10 =-4.f*f[1]+2.f*f[2]+-4.f*f[3]+2.f*f[4]+f[5]+f[6]+f[7]+f[8]+2.f*f[9]+f[10]+-2.f*f[11]+f[12]+-2.f*f[13]+2.f*f[14]+f[15]+-2.f*f[16]+f[17]+-2.f*f[18];
float m16 = f[5]+-f[6]+-f[7]+f[8]-f[10]+f[12]+-f[15]+f[17];
float m[19] = {0};
Moments_host(f,m);
float omega = 1.0f/(3.0f*(UMAX*OBSTR1*2.f/RE)+0.5f);
//float omega2 = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f));
float PI11 = -0.026315789f*m[ 1]-0.5f *omega*m[ 9];
float PI22 = -0.026315789f*m[ 1]+0.25f*omega*(m[ 9]-3.0f*m[11]);
float PI33 = -0.026315789f*m[ 1]+0.25f*omega*(m[ 9]+3.0f*m[11]);
float PI12 = -1.5f*omega*m[13];
float PI23 = -1.5f*omega*m[14];
float PI13 = -1.5f*omega*m[15];
//we know Smag on coarse mesh
float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
//InvertMoments_host(f,m);
//u = m[3];
//v = m[5];
//w = m[7];
//m6 = m[6 ];
//m10= m[10];
//m16= m[16];
int z = (ZDIM/GPU_N*GPU+k);
output<<i<<", "<<j<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv[0][i+j*XDIM+k*XDIM*YDIM]<<","<<velAv[1][i+j*XDIM+k*XDIM*YDIM]<<", "
<<velFluc[0][i+j*XDIM+k*XDIM*YDIM]<<","<<Smag<<endl;
//<<velFluc[0][i+j*XDIM+k*XDIM*YDIM]<<","<<velFluc[1][i+j*XDIM+k*XDIM*YDIM]<<endl;
}}}
for(int j = 0; j<YDIM; j++){
for(int i = 0; i<XDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = hin[(i+j*XDIM)+l *XDIM*YDIM];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
output<<i<<", "<<j<<", "<<(ZDIM/GPU_N*(GPU+1)-1)<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv[0][i+j*XDIM+(ZDIM-1)*XDIM*YDIM]<<","<<velAv[1][i+j*XDIM+(ZDIM/GPU_N-1)*XDIM*YDIM]<<", "
<<velFluc[0][i+j*XDIM+(ZDIM-1)*XDIM*YDIM]<<","<<velFluc[1][i+j*XDIM+(ZDIM/GPU_N-1)*XDIM*YDIM]<<endl;
}}
}
void WriteResultsLR(ofstream &output, float *fin, float *gin, float *hin, float **velAv,
float **velFluc, float omega, int GPU_N, int GPU)
{
float f[19];
output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"velAv[0]\",\"velAv[1]\",\"ufluc\",\"vfluc\"\n";
output<<"ZONE F=POINT, I="<<XLRDIM<<", J="<<YLRDIM<<", K="<<ZLRDIM/GPU_N<<"\n";
for(int j = 0; j<YLRDIM; j++){
for(int i = 0; i<XLRDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = gin[(i+j*XLRDIM)+l *XLRDIM*YLRDIM];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
float x = LRX0+LRFACTOR*i;
float y = LRY0+LRFACTOR*j;
float z = LRZ0+LRFACTOR*(ZLRDIM/GPU_N*GPU);
output<<x<<", "<<y<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv[0][i+j*XLRDIM]<<","<<velAv[1][i+j*XLRDIM]<<", "<<velFluc[0][i+j*XLRDIM]<<","<<velFluc[1][i+j*XLRDIM]<<endl;
}}
for(int k = 1; k<ZLRDIM/GPU_N-1; k++){
for(int j = 0; j<YLRDIM; j++){
for(int i = 0; i<XLRDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = fin[(i+j*XLRDIM)+(k-1)*XLRDIM*YLRDIM+l*XLRDIM*YLRDIM*(ZLRDIM/GPU_N-2)];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
float x = LRX0+LRFACTOR*i;
float y = LRY0+LRFACTOR*j;
float z = LRZ0+LRFACTOR*(ZLRDIM/GPU_N*GPU+k);
float m[19] = {0};
Moments_host(f,m);
float omega = 1.0f/(3.0f*(UMAX*OBSTR1*2.f/RE)+0.5f);
//float omega2 = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f));
float PI11 = -0.026315789f*m[ 1]-0.5f *omega*m[ 9];
float PI22 = -0.026315789f*m[ 1]+0.25f*omega*(m[ 9]-3.0f*m[11]);
float PI33 = -0.026315789f*m[ 1]+0.25f*omega*(m[ 9]+3.0f*m[11]);
float PI12 = -1.5f*omega*m[13];
float PI23 = -1.5f*omega*m[14];
float PI13 = -1.5f*omega*m[15];
//we know Smag on coarse mesh
float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
output<<x<<", "<<y<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv [0][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<velAv [1][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<", "
<<velFluc[0][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<Smag<<endl;
//<<velFluc[0][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<velFluc[1][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<endl;
}}}
for(int j = 0; j<YLRDIM; j++){
for(int i = 0; i<XLRDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = hin[(i+j*XLRDIM)+l *XLRDIM*YLRDIM];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
float x = LRX0+LRFACTOR*i;
float y = LRY0+LRFACTOR*j;
float z = LRZ0+LRFACTOR*(ZLRDIM/GPU_N*(GPU+1)-1);
output<<x<<", "<<y<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv[0][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<","<<velAv[1][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<", "
<<velFluc[0][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<","<<velFluc[1][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<endl;
}}
}
void WriteForces(float **F, ofstream &output, int ForceTime, int level)
{
float ref = UMAX*UMAX*ZDIM*OBSTR1;
if(level > 0)
ref *= LRLEVEL*LRLEVEL;
for(int i = 0; i<ForceTime; i++){
output<<i+STARTF<<", "<<F[0][i]/ref<<", "<<F[1][i]/ref<<", "<<F[2][i]/ref<<endl;
}
}
void WriteInputs(ostream &output, float omega, float omegaLR, int GPU_per_node)
{
output<<"Base domain size \t"<<XDIM<<"x"<<YDIM<<"x"<<ZDIM<<endl;
output<<"Base blocksize: \t"<<BLOCKSIZEX<<"x"<<BLOCKSIZEY<<"x"<<BLOCKSIZEZ<<endl;
output<<"Obst1 location: \t("<<OBSTX1<<","<<OBSTY1<<","<<OBSTZ1<<")"<<endl;
output<<"Obst1 radius: \t"<<OBSTR1<<endl;
output<<"Obst2 location: \t("<<OBSTX2<<","<<OBSTY2<<","<<OBSTZ2<<")"<<endl;
output<<"Obst2 radius: \t"<<OBSTR2<<endl;
output<<"RE: \t"<<RE<<endl;
output<<"UMAX: \t"<<UMAX<<endl;
output<<"omega \t: "<<omega<<endl;
output<<"TMAX: \t"<<TMAX<<endl;
output<<"STARTF: \t"<<STARTF<<endl;
output<<"START_VELAV: \t"<<START_VELAV<<endl;
output<<"START_VELFLUC: \t"<<START_VELFLUC<<endl;
output<<"REFINEMENT: \t"<<REFINEMENT<<endl;
output<<"MODEL: \t"<<MODEL<<endl;
output<<"Smagorinsky LES: \t"<<SmagLES<<endl;
output<<"CS: \t"<<CS<<endl;
output<<"LR domain size \t"<<XLRDIM<<"x"<<YLRDIM<<"x"<<ZLRDIM<<endl;
output<<"LR factor \t"<<LRFACTOR<<endl;
output<<"LR location \t"<<LRX0<<"x"<<LRY0<<"x"<<LRZ0<<endl;
output<<"LR blocksize: \t"<<BLOCKSIZELRX<<"x"<<BLOCKSIZELRY<<"x"<<BLOCKSIZELRZ<<endl;
output<<"omega in LR \t: "<<omegaLR<<endl;
output<<"GPUs per node \t: "<<GPU_per_node<<endl;
}
int main(int argc, char *argv[])
{
int GPU_N; hipGetDeviceCount(&GPU_N);
cout<<"number of GPUs: "<<GPU_N<<endl;
ofstream output; ofstream outputForce; ofstream outputInputs;
string FileName = CASENAME;
output.open ((FileName+".dat").c_str());
outputForce.open ((FileName+".force").c_str());
outputInputs.open ((FileName+".inputs").c_str());
//size_t memsize, memsize2;
size_t pitch = 2;
while(pitch<XDIM)
pitch=pitch*2;
pitch *= sizeof(float);//pitch*sizeof(float);
size_t pitch_e = pitch/sizeof(float);
cout<<"Pitch (in elements): "<<pitch/sizeof(float)<<endl;
float CharLength = OBSTR1*2.f;
float omega = 1.0f/(3.0f*(UMAX*CharLength/RE)+0.5f);
float omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f));
if(LRFACTOR == 0.25f){
omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omegaLR-1.0f));
}
if(LRFACTOR == 0.125f){
omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omegaLR-1.0f));
omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omegaLR-1.0f));
}
float SF_cf = omega*(1.0f-omegaLR)/((1.0f-omega)*omegaLR/LRFACTOR);
float SF_fc = 1.f/SF_cf;
cout<<SF_cf<<endl;
WriteInputs(outputInputs,omega,omegaLR,GPU_N);
WriteInputs(cout,omega,omegaLR,GPU_N);
if(abs(LRFACTOR-1.f/LRLEVEL)>0.001f && REFINEMENT == 1){
cout<<"LRLEVEL and LRFACTOR don't match! Exiting..."<<endl;
return 0;
}
int zInner = ZDIM/GPU_N-2; //excluding halo
int ForceTime = max(0,TMAX-STARTF);
dim3 threads(BLOCKSIZEX, BLOCKSIZEY, BLOCKSIZEZ);
//2 halo layers per GPU (for 2 GPUs)
dim3 grid (((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),(zInner)/BLOCKSIZEZ);
dim3 g_grid(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),1);
hipStream_t stream_halo[GPU_N];
hipStream_t stream_inner[GPU_N];
//data pointers as 3D array (GPUxCoord)
float *f_h[GPU_N], *g_h[GPU_N], *h_h[GPU_N];
float *f_d[GPU_N][2], *g_d[GPU_N][2], *h_d[GPU_N][2];
float *g_temp[GPU_N], *h_temp[GPU_N];
float *F_h[GPU_N][3];
float *F_d[GPU_N][3];
float *F_total[3];
float *velAv_h[GPU_N][3],*velFluc_h[GPU_N][3];
float *velAv_d[GPU_N][3],*velFluc_d[GPU_N][3];
for(int i = 0; i<3; i++)
F_total[i] = (float *)malloc(ForceTime*sizeof(float));
for(int i=0;i<3;i++)
for(int j=0;j<(ForceTime);j++)
F_total[i][j] = 0;
//Malloc and Initialize for each GPU
for(int n = 0; n<GPU_N; n++){
f_h [n] = (float *)malloc(XDIM*YDIM*zInner*19*sizeof(float));
g_h [n] = (float *)malloc(XDIM*YDIM* 19*sizeof(float));
h_h [n] = (float *)malloc(XDIM*YDIM* 19*sizeof(float));
for(int i = 0; i<3; i++){
F_h [n][i] = (float *)malloc(ForceTime*sizeof(float));
velAv_h [n][i] = (float *)malloc(XDIM*YDIM*ZDIM/GPU_N*sizeof(float));
velFluc_h[n][i] = (float *)malloc(XDIM*YDIM*ZDIM/GPU_N*sizeof(float));
}
hipSetDevice(n);
hipStreamCreate(&stream_halo[n]);
hipStreamCreate(&stream_inner[n]);
for(int m = 0; m<GPU_N; m++)
if(m != n) hipDeviceEnablePeerAccess(m,0);
for(int i = 0; i<2; i++){
hipMalloc((void **) &f_d[n][i], pitch_e*YDIM*zInner*19*sizeof(float));
hipMalloc((void **) &g_d[n][i], pitch_e*YDIM* 19*sizeof(float));
hipMalloc((void **) &h_d[n][i], pitch_e*YDIM* 19*sizeof(float));
}
hipMalloc((void **) & g_temp[n], pitch_e*YDIM* 19*sizeof(float));
hipMalloc((void **) & h_temp[n], pitch_e*YDIM* 19*sizeof(float));
for(int i = 0; i<3; i++){
hipMalloc((void **) & F_d [n][i], (ForceTime)*sizeof(float));
hipMalloc((void **) & velAv_d [n][i], pitch_e*YDIM*ZDIM/GPU_N*sizeof(float));
hipMalloc((void **) & velFluc_d[n][i], pitch_e*YDIM*ZDIM/GPU_N*sizeof(float));
}
//initialize host f_inner
for (int i = 0; i < XDIM*YDIM*zInner*19; i++)
f_h[n][i] = 0;
//initialize host g,h
for (int i = 0; i < XDIM*YDIM*19; i++){
g_h[n][i] = 0;
h_h[n][i] = 0;
}
for(int i=0;i<3;i++){
for(int j=0;j<(ForceTime);j++)
F_h[n][i][j] = 0;
for (int j = 0; j < XDIM*YDIM*ZDIM/GPU_N; j++){
velAv_h [n][i][j] = 0;
velFluc_h[n][i][j] = 0;
}
}
for(int i = 0; i<2; i++){
hipMemcpy2D(f_d[n][i],pitch,f_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*zInner*19,hipMemcpyHostToDevice);
hipMemcpy2D(g_d[n][i],pitch,g_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM *19,hipMemcpyHostToDevice);
hipMemcpy2D(h_d[n][i],pitch,h_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM *19,hipMemcpyHostToDevice);
}
for(int i = 0; i<3; i++){
hipMemcpy2D(velAv_d [n][i],pitch,velAv_h [n][i],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM/GPU_N,hipMemcpyHostToDevice);
hipMemcpy2D(velFluc_d[n][i],pitch,velFluc_h[n][i],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM/GPU_N,hipMemcpyHostToDevice);
hipMemcpy(F_d[n][i],F_h[n][i],sizeof(float)*(ForceTime),hipMemcpyHostToDevice);
}
//initialization kernels
for(int i = 0; i<2; i++){
hipLaunchKernelGGL(( initialize), dim3(grid),dim3(threads), 0, 0, f_d[n][i],pitch_e,zInner,GPU_N);
hipLaunchKernelGGL(( initialize), dim3(g_grid),dim3(threads), 0, 0, g_d[n][i],pitch_e, 1,GPU_N);
hipLaunchKernelGGL(( initialize), dim3(g_grid),dim3(threads), 0, 0, h_d[n][i],pitch_e, 1,GPU_N);
}
hipLaunchKernelGGL(( initialize), dim3(g_grid),dim3(threads), 0, 0, g_temp[n],pitch_e, 1,GPU_N);
hipLaunchKernelGGL(( initialize), dim3(g_grid),dim3(threads), 0, 0, h_temp[n],pitch_e, 1,GPU_N);
}//end Malloc and Initialize
//data pointers as 3D array (GPUxCoord)
float *f_LR_h[GPU_N], *g_LR_h[GPU_N], *h_LR_h[GPU_N];
float *f_LR_d[GPU_N][2], *g_LR_d[GPU_N][2], *h_LR_d[GPU_N][2];
float *g_LR_temp[GPU_N], *h_LR_temp[GPU_N];
float *velAv_LR_h[GPU_N][3],*velFluc_LR_h[GPU_N][3];
float *velAv_LR_d[GPU_N][3],*velFluc_LR_d[GPU_N][3];
float *f_interp[GPU_N], *g_interp[GPU_N], *h_interp[GPU_N], *g_interp_temp[GPU_N], *h_interp_temp[GPU_N];
float *interp_h[GPU_N];
size_t pitchLR = 2;
while(pitchLR<XLRDIM)
pitchLR=pitchLR*2;
pitchLR = pitchLR*sizeof(float);
size_t pitchLR_e = pitchLR/sizeof(float);
cout<<"LR Pitch (in elements): "<<pitchLR_e<<endl;
size_t pitchInterp = 2;
while(pitchInterp<XLRDIM*LRFACTOR+1)
pitchInterp=pitchInterp*2;
pitchInterp = pitchInterp*sizeof(float);
size_t pitchInterp_e = pitchInterp/sizeof(float);
cout<<"Interp Pitch (in elements): "<<pitchInterp_e<<endl;
int zLRInner = ZLRDIM/GPU_N-2;
dim3 LR_threads(BLOCKSIZELRX, BLOCKSIZELRY, BLOCKSIZELRZ);
dim3 LR_grid(((XLRDIM+BLOCKSIZELRX-1)/BLOCKSIZELRX),((YLRDIM+BLOCKSIZELRY-1)/BLOCKSIZELRY),(zLRInner)/BLOCKSIZELRZ);
dim3 g_LR_grid(((XLRDIM+BLOCKSIZELRX-1)/BLOCKSIZELRX),((YLRDIM+BLOCKSIZELRY-1)/BLOCKSIZELRY),1);
dim3 Interp_threads(BLOCKSIZEINTERP, LRLEVEL, LRLEVEL);
dim3 Interp_grid(((XLRDIM+BLOCKSIZEINTERP-1)/BLOCKSIZEINTERP),((YLRDIM+LRLEVEL-1)/LRLEVEL),ZLRDIM/LRLEVEL/GPU_N);
cout<<((XLRDIM+BLOCKSIZEINTERP-1)/BLOCKSIZEINTERP)<<", "<<((YLRDIM+LRLEVEL-1)/LRLEVEL)<<", "<<ZLRDIM/LRLEVEL/GPU_N<<endl;
dim3 Interp_grid_c(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),(ZDIM/GPU_N)/BLOCKSIZEZ);
//setup LR
if(REFINEMENT == 1){
for(int n = 0; n<GPU_N; n++){
f_LR_h [n] = (float *)malloc(XLRDIM*YLRDIM*zLRInner*19*sizeof(float));
g_LR_h [n] = (float *)malloc(XLRDIM*YLRDIM* 19*sizeof(float));
h_LR_h [n] = (float *)malloc(XLRDIM*YLRDIM* 19*sizeof(float));
interp_h [n] = (float *)malloc((XLRDIM*LRFACTOR+1)*(YLRDIM*LRFACTOR+1)*zInner*9*sizeof(float));
for(int i = 0; i<3; i++){
velAv_LR_h [n][i] = (float *)malloc(XLRDIM*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
velFluc_LR_h[n][i] = (float *)malloc(XLRDIM*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
}
hipSetDevice(n);
for(int i = 0; i<2; i++){
hipMalloc((void **) &f_LR_d[n][i], pitchLR_e*YLRDIM*zLRInner*19*sizeof(float));
hipMalloc((void **) &g_LR_d[n][i], pitchLR_e*YLRDIM* 19*sizeof(float));
hipMalloc((void **) &h_LR_d[n][i], pitchLR_e*YLRDIM* 19*sizeof(float));
}
hipMalloc((void **) & g_LR_temp[n], pitchLR_e*YLRDIM* 19*sizeof(float));
hipMalloc((void **) & h_LR_temp[n], pitchLR_e*YLRDIM* 19*sizeof(float));
hipMalloc((void **) & f_interp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*zInner*9*sizeof(float));
hipMalloc((void **) & g_interp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*9*sizeof(float));
hipMalloc((void **) & h_interp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*9*sizeof(float));
hipMalloc((void **) & g_interp_temp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*9*sizeof(float));
hipMalloc((void **) & h_interp_temp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*9*sizeof(float));
for(int i = 0; i<3; i++){
hipMalloc((void **) & velAv_LR_d [n][i], pitchLR_e*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
hipMalloc((void **) & velFluc_LR_d[n][i], pitchLR_e*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
}
for (int i = 0; i < XLRDIM*YLRDIM*zLRInner*19; i++)
f_LR_h[n][i] = 0;
//initialize host g,h
for (int i = 0; i < XLRDIM*YLRDIM*19; i++){
g_LR_h[n][i] = 0;
h_LR_h[n][i] = 0;
}
for(int i=0;i<3;i++){
for (int j = 0; j < XLRDIM*YLRDIM*ZLRDIM/GPU_N; j++){
velAv_LR_h [n][i][j] = 0;
velFluc_LR_h[n][i][j] = 0;
}
}
for(int i = 0; i<2; i++){
hipMemcpy2D(f_LR_d[n][i],pitchLR,f_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*zLRInner*19,hipMemcpyHostToDevice);
hipMemcpy2D(g_LR_d[n][i],pitchLR,g_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM *19,hipMemcpyHostToDevice);
hipMemcpy2D(h_LR_d[n][i],pitchLR,h_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM *19,hipMemcpyHostToDevice);
}
for(int i = 0; i<3; i++){
hipMemcpy2D(velAv_LR_d [n][i],pitchLR,velAv_LR_h [n][i],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,hipMemcpyHostToDevice);
hipMemcpy2D(velFluc_LR_d[n][i],pitchLR,velFluc_LR_h[n][i],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,hipMemcpyHostToDevice);
}
//initialization kernels
for(int i = 0; i<2; i++){
hipLaunchKernelGGL(( initializeLR), dim3(LR_grid),dim3(LR_threads), 0, 0, f_LR_d[n][i],pitchLR_e,zLRInner,GPU_N);
hipLaunchKernelGGL(( initializeLR), dim3(g_LR_grid),dim3(LR_threads), 0, 0, g_LR_d[n][i],pitchLR_e, 1,GPU_N);
hipLaunchKernelGGL(( initializeLR), dim3(g_LR_grid),dim3(LR_threads), 0, 0, h_LR_d[n][i],pitchLR_e, 1,GPU_N);
}
hipLaunchKernelGGL(( initializeLR), dim3(g_LR_grid),dim3(LR_threads), 0, 0, g_LR_temp[n],pitchLR_e, 1,GPU_N);
hipLaunchKernelGGL(( initializeLR), dim3(g_LR_grid),dim3(LR_threads), 0, 0, h_LR_temp[n],pitchLR_e, 1,GPU_N);
}//end of GPU loop for malloc and initialize for LR
}//end of LR malloc and initialize
hipFuncSetCacheConfig(InterpCF,hipFuncCachePreferShared);
int A = 0; int B = 1; int C = 0; int D = 1;
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
size_t mem_avail, mem_total;
hipMemGetInfo(&mem_avail,&mem_total);
cout<<"Device memory used for dev"<<n<<" : "<<(mem_total-mem_avail)*pow(10,-9)<<" GB\n";
cout<<"Device memory available for dev"<<n<<" : "<<(mem_avail)*pow(10,-9)<<" GB\n";
}
struct timeval tdr0,tdr1;
double restime;
hipDeviceSynchronize();
gettimeofday (&tdr0,NULL);
//time loop
for(int t = 0; t<TMAX; t++)
{
//copy temporary array for top and bottom on coarse mesh to neighbor GPU. Only transfering 5 distbs
for(int n = 0; n<GPU_N; n++)
hipMemcpyPeerAsync(&h_temp[n][pitch_e*YDIM*14],n,&g_d[ (n+1)%GPU_N][A][pitch_e*YDIM*14], (n+1)%GPU_N,pitch_e*YDIM*sizeof(float)*5,stream_halo[n]);
for(int n = 0; n<GPU_N; n++)
hipMemcpyPeerAsync(&g_temp[n][pitch_e*YDIM*9],n,&h_d[abs(n-1)%GPU_N][A][pitch_e*YDIM*9],abs(n-1)%GPU_N,pitch_e*YDIM*sizeof(float)*5,stream_halo[n]);
//compute inner nodes on coarse mesh
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipLaunchKernelGGL(( update_inn), dim3(grid),dim3(threads),0,stream_inner[n], f_d[n][B],f_d[n][A],g_d[n][A], h_d[n][A],omega,pitch_e,n,zInner,velAv_d[n][0],velAv_d[n][1],velFluc_d[n][0],velFluc_d[n][1],F_d[n][0],F_d[n][1],F_d[n][2],t,(!REFINEMENT&&t>STARTF),f_interp[n],pitchInterp_e);
}
//synchronize halo stream before computing top and bottom nodes
for(int n = 0; n<GPU_N; n++)
hipStreamSynchronize(stream_halo[n]);
//compute top and bottom nodes
for(int n = 0; n<GPU_N; n++)
{
hipSetDevice(n);
hipLaunchKernelGGL(( update_top), dim3(g_grid), dim3(threads), 0, stream_halo [n], h_d[n][B],h_d[n][A],f_d[n][A],h_temp[n],omega,pitch_e,n,zInner,F_d[n][0],F_d[n][1],F_d[n][2],t,(!REFINEMENT&&t>STARTF),h_interp[n],pitchInterp_e);
hipLaunchKernelGGL(( update_bot), dim3(g_grid), dim3(threads), 0, stream_halo [n], g_d[n][B],g_d[n][A],f_d[n][A],g_temp[n],omega,pitch_e,n,zInner,F_d[n][0],F_d[n][1],F_d[n][2],t,(!REFINEMENT&&t>STARTF),g_interp[n],pitchInterp_e);
}
//hipDeviceSynchronize();
swap(A,B);
if(REFINEMENT == 1){
int flag_F = 0;
for(int i = 0; i<LRLEVEL; i++){
if(t>STARTF && i == 0) flag_F = 1;
else flag_F = 0;
for(int n = 0; n<GPU_N; n++){
hipMemcpyPeerAsync(&h_LR_temp[n][pitchLR_e*YLRDIM*14],n,&g_LR_d[ (n+1)%GPU_N][C][pitchLR_e*YLRDIM*14], (n+1)%GPU_N,pitchLR_e*YLRDIM*sizeof(float)*5,stream_halo[n]);
hipMemcpyPeerAsync(&g_LR_temp[n][pitchLR_e*YLRDIM*9 ],n,&h_LR_d[abs(n-1)%GPU_N][C][pitchLR_e*YLRDIM*9 ],abs(n-1)%GPU_N,pitchLR_e*YLRDIM*sizeof(float)*5,stream_halo[n]);
}
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipLaunchKernelGGL(( update_inn_LR), dim3(LR_grid),dim3(LR_threads),0,stream_inner[n], f_LR_d[n][D],f_LR_d[n][C],g_LR_d[n][C], h_LR_d[n][C],omegaLR,pitchLR_e,n,zLRInner,velAv_LR_d[n][0],velAv_LR_d[n][1],velFluc_LR_d[n][0],velFluc_LR_d[n][1],F_d[n][0],F_d[n][1],F_d[n][2],t,flag_F);
}
for(int n = 0; n<GPU_N; n++)
hipStreamSynchronize(stream_halo[n]);
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipLaunchKernelGGL(( update_top_LR), dim3(g_LR_grid),dim3(LR_threads),0,stream_halo[n], h_LR_d[n][D],h_LR_d[n][C],f_LR_d[n][C],h_LR_temp[n],omegaLR,pitchLR_e,n,zLRInner,F_d[n][0],F_d[n][1],F_d[n][2],t,flag_F);
hipLaunchKernelGGL(( update_bot_LR), dim3(g_LR_grid),dim3(LR_threads),0,stream_halo[n], g_LR_d[n][D],g_LR_d[n][C],f_LR_d[n][C],g_LR_temp[n],omegaLR,pitchLR_e,n,zLRInner,F_d[n][0],F_d[n][1],F_d[n][2],t,flag_F);
}
if(i == LRLEVEL-1)
{
for(int n = 0; n<GPU_N; n++)
//hipMemcpyPeerAsync(&h_interp_temp[n][0],n,&g_interp[ (n+1)%GPU_N][0], (n+1)%GPU_N,pitchInterp_e*(YLRDIM*LRFACTOR+1)*sizeof(float)*9,stream_halo[n]);
for(int n = 0; n<GPU_N; n++)
hipMemcpyPeerAsync(&g_interp_temp[n][0],n,&h_interp[abs(n-1)%GPU_N][0],abs(n-1)%GPU_N,pitchInterp_e*(YLRDIM*LRFACTOR+1)*sizeof(float)*9,stream_halo[n]);
}
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipDeviceSynchronize();
}
flag_F = 0;
swap(C,D);
}
//interp from coarse grid
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipLaunchKernelGGL(( InterpCF), dim3(Interp_grid),dim3(Interp_threads),0,stream_inner[n], f_LR_d[n][C],g_LR_d[n][C],h_LR_d[n][C],pitchLR_e,f_interp[n],g_interp[n],h_interp[n],g_interp_temp[n],pitchInterp_e,SF_cf,omega,zInner,zLRInner);
//hipDeviceSynchronize();
}
//interp from fine grid
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipMemcpyPeerAsync(&h_LR_temp[n][0],n,&g_LR_d[ (n+1)%GPU_N][C][0], (n+1)%GPU_N,pitchLR_e*YLRDIM*sizeof(float)*19,stream_halo[n]);
}
for(int n = 0; n<GPU_N; n++)
hipStreamSynchronize(stream_halo[n]);
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipLaunchKernelGGL(( InterpFC), dim3(Interp_grid_c),dim3(threads),0,stream_halo[n], f_d[n][A],g_d[n][A],h_d[n][A],f_LR_d[n][C],h_LR_d[n][C],h_LR_temp[n],pitch_e,pitchLR_e,SF_fc,omegaLR,zInner,zLRInner);
}
}//end refinement
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipDeviceSynchronize();
}
}//end time loop
hipDeviceSynchronize();
gettimeofday (&tdr1,NULL);
timeval_subtract (&restime, &tdr1, &tdr0);
int Nodes;
Nodes = XDIM*YDIM*ZDIM;
if (REFINEMENT == 1)
Nodes += XLRDIM*YLRDIM*ZLRDIM*LRLEVEL;
cout<<"Time taken for main kernel: "<<restime<<" ("
<<double(Nodes*double(TMAX/1000000.f))/restime<<"MLUPS)\n";
//D2H Memcpy and write results
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipMemcpy2D(f_h[n],XDIM*sizeof(float),f_d[n][A],pitch,XDIM*sizeof(float),YDIM*zInner*19,hipMemcpyDeviceToHost);
hipMemcpy2D(g_h[n],XDIM*sizeof(float),g_d[n][A],pitch,XDIM*sizeof(float),YDIM *19,hipMemcpyDeviceToHost);
hipMemcpy2D(h_h[n],XDIM*sizeof(float),h_d[n][A],pitch,XDIM*sizeof(float),YDIM *19,hipMemcpyDeviceToHost);
for(int i = 0; i<3; i++){
hipMemcpy2D( velAv_h[n][i],XDIM*sizeof(float),velAv_d[n][i],pitch,XDIM*sizeof(float),YDIM*ZDIM/GPU_N,hipMemcpyDeviceToHost);
hipMemcpy2D(velFluc_h[n][i],XDIM*sizeof(float),velFluc_d[n][i],pitch,XDIM*sizeof(float),YDIM*ZDIM/GPU_N,hipMemcpyDeviceToHost);
hipMemcpy(F_h[n][i],F_d[n][i],sizeof(float)*ForceTime,hipMemcpyDeviceToHost);
}
WriteResults(output,f_h[n],g_h[n],h_h[n],velAv_h[n],velFluc_h[n],omega,GPU_N,n);
output<<endl;
for(int i=0;i<3;i++)
for(int j=0;j<ForceTime;j++)
F_total[i][j] += F_h[n][i][j];
for(int i = 0; i<2; i++){
hipFree(f_d[n][i]);
hipFree(g_d[n][i]);
hipFree(h_d[n][i]);
}
hipFree(f_d[n]);
hipFree(g_d[n]);
hipFree(h_d[n]);
hipFree(g_temp[n]);
hipFree(h_temp[n]);
for(int i=0;i<3;i++)
hipFree(F_d[n][i]);
hipFree(F_d[n]);
}//end Memcpy and write results
WriteForces(F_total,outputForce,ForceTime,REFINEMENT*LRLEVEL);
if(REFINEMENT == 1){
// output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"uAv\",\"vAv\",\"ufluc\",\"vfluc\"\n";
// output<<"ZONE F=POINT, I="<<XLRDIM<<", J="<<YLRDIM<<", K="<<ZLRDIM<<"\n";
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipMemcpy2D(f_LR_h[n],XLRDIM*sizeof(float),f_LR_d[n][C],pitchLR,XLRDIM*sizeof(float),YLRDIM*zLRInner*19,hipMemcpyDeviceToHost);
hipMemcpy2D(g_LR_h[n],XLRDIM*sizeof(float),g_LR_d[n][C],pitchLR,XLRDIM*sizeof(float),YLRDIM *19,hipMemcpyDeviceToHost);
hipMemcpy2D(h_LR_h[n],XLRDIM*sizeof(float),h_LR_d[n][C],pitchLR,XLRDIM*sizeof(float),YLRDIM *19,hipMemcpyDeviceToHost);
//hipMemcpy2D(interp_h[n],(XLRDIM*LRFACTOR+1)*sizeof(float),f_interp[n],pitchInterp,(XLRDIM*LRFACTOR+1)*sizeof(float),(YLRDIM*LRFACTOR+1)*zInner*9,hipMemcpyDeviceToHost);
for(int i = 0; i<3; i++){
hipMemcpy2D( velAv_LR_h[n][i],XLRDIM*sizeof(float),velAv_LR_d[n][i],pitchLR,XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,hipMemcpyDeviceToHost);
hipMemcpy2D(velFluc_LR_h[n][i],XLRDIM*sizeof(float),velFluc_LR_d[n][i],pitchLR,XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,hipMemcpyDeviceToHost);
}
WriteResultsLR(output,f_LR_h[n],g_LR_h[n],h_LR_h[n],velAv_LR_h[n],velFluc_LR_h[n],omegaLR,GPU_N,n);
output<<endl;
for(int i = 0; i<2; i++){
hipFree(f_LR_d[n][i]);
hipFree(g_LR_d[n][i]);
hipFree(h_LR_d[n][i]);
}
hipFree(f_LR_d[n]);
hipFree(g_LR_d[n]);
hipFree(h_LR_d[n]);
hipFree(g_LR_temp[n]);
hipFree(h_LR_temp[n]);
}
}
return 0;
}
|
a208e719bf00fc6487d5dacba417961933e61b9c.cu
|
#include <cuda.h>
#include <iostream>
#include <ostream>
#include <fstream>
#include <sys/time.h>
#include <time.h>
using namespace std;
#define CASENAME "LR1_o2"
#define BLOCKSIZEX 64
#define BLOCKSIZEY 1
#define BLOCKSIZEZ 1
#define BLOCKSIZELRX 64
#define BLOCKSIZELRY 1
#define BLOCKSIZELRZ 1
#define BLOCKSIZEINTERP 8
#define XDIM 251
#define YDIM 43
#define ZDIM 43
#define TMAX 20000
#define STARTF 0
#define OBSTR1 5.f
#define OBSTX1 50.5f
#define OBSTY1 20.5f
#define OBSTZ1 32.5f
#define OBSTR2 32.f
#define OBSTX2 319.5f
#define OBSTY2 511.5f
#define OBSTZ2 31.5f
#define LRFACTOR 0.5f
#define LRLEVEL 2
#define LRX0 30.25f //minimum x coord of LR
#define XLRDIM 128 //number of nodes in x
#define LRY0 10.25f
#define YLRDIM 42
#define LRZ0 -0.75f
#define ZLRDIM 86
#define ORDER 1 //order of accuracy of interpolation
//#define LRFACTOR 0.25f
//#define LRLEVEL 4
//#define LRX0 30.125f //minimum x coord of LR
//#define XLRDIM 256 //number of nodes in x
//#define LRY0 10.125f
//#define YLRDIM 84
//#define LRZ0 -0.875f
//#define ZLRDIM 172
//#define ORDER 2 //order of accuracy of interpolation
#define RE 20.f//2000.f//100.f;
#define UMAX 0.04f
#define SmagLES 0 //1,0
#define MODEL "MRT" //BGK,MRT,STREAM
#define REFINEMENT 1 //1,0
#define CS 0.02f
#define VELAV 1
#define START_VELAV 40000
#define START_VELFLUC 80000
inline __device__ int ImageFcnLR(float x, float y, float z)
{
int value = 0;
// if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1))<OBSTR1*OBSTR1)
// value = 10;
// else if(((x-OBSTX2)*(x-OBSTX2)+(y-OBSTY2)*(y-OBSTY2))<OBSTR2*OBSTR2)
// value = 10;
if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
{
value = 10;
// if(z < 1 || z > ZDIM-2)
// value = 1;
}
if(z < 0.5f)
value = 1;
if(z > ZDIM-1-0.5f)
value = 1;
return value;
}
inline __device__ int ImageFcn(int x, int y, int z)
{
int value = 0;
//Cylinder
// if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1))<OBSTR1*OBSTR1)
// value = 10;
// else if(((x-OBSTX2)*(x-OBSTX2)+(y-OBSTY2)*(y-OBSTY2))<OBSTR2*OBSTR2)
// value = 10;
// if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
// value = 10;
//Lid Driven Cavity
// if(y == 0 || y == YDIM-1 || z == 0 || z == ZDIM-1)
// value = 1;
// else if(x == XDIM-2 || y == 1 || y == YDIM-2 || z == 1 || z == ZDIM-2)
// return 1;
// else if(x == 0)
// return 1;
if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
value = 10;
// if(z == 1)
// value = 1;
// if(z == ZDIM-2)
// value = 1;
if(y == 0)
value = 1;//200;//22;
else if(y == YDIM-1)
value = 1;//100;
else if(x == 0)
value = 400;//26;
else if(x == XDIM-1)
//else if(x > 42)
value = 300;//25;
else if(z == 0)
value = 1;
else if(z == ZDIM-1)
value = 1;
return value;
}
inline __device__ float PoisProf (float x){
float radius = (YDIM-1-1)*0.5f;
float result = -1.5f*(((1.0f-(x-0.5f)/radius))*((1.0f-(x-0.5f)/radius))-1.0f);
return (result);
}
inline __device__ float PoisProf3D (float x, float y){
x = x-0.5f;
y = y-0.5f;
float H = 41.f;
return 2.25f*16.f*UMAX*x*y*(H-x)*(H-y)/((H)*(H)*(H)*(H));
// float radius = (YDIM-1-1)*0.5f;
// float result = -1.0f*(((1.0f-(x-0.5f)/radius))*((1.0f-(x-0.5f)/radius))-1.0f);
// return (result);
}
int
timeval_subtract (double *result, struct timeval *x, struct timeval *y)
{
struct timeval result0;
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result0.tv_sec = x->tv_sec - y->tv_sec;
result0.tv_usec = x->tv_usec - y->tv_usec;
*result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
__device__ int dmin(int a, int b)
{
if (a<b) return a;
else return b-1;
}
__device__ int dmax(int a)
{
if (a>-1) return a;
else return 0;
}
__device__ int dmax(int a,int b)
{
if (a>b) return a;
else return b;
}
__device__ int dmin_p(int a, int b)
{
if (a<b) return a;
else return 0;
}
__device__ int dmax_p(int a, int b)
{
if (a>-1) return a;
else return b-1;
}
inline __device__ float trilinear_interp (float v000, float v001, float v010, float v011,
float v100, float v101, float v110, float v111, float x, float y, float z){
return v000*(1.f-x)*(1.f-y)*(1.f-z)+
v001*( x)*(1.f-y)*(1.f-z)+
v010*(1.f-x)*( y)*(1.f-z)+
v011*( x)*( y)*(1.f-z)+
v100*(1.f-x)*(1.f-y)*( z)+
v101*( x)*(1.f-y)*( z)+
v110*(1.f-x)*( y)*( z)+
v111*( x)*( y)*( z);
}
inline __device__ int f_mem(int f_num, int x, int y, int z, size_t pitch, int zInner)
{
int index = (x+y*pitch+z*YDIM*pitch)+f_num*pitch*YDIM*(zInner);
index = dmax(index);
index = dmin(index,19*pitch*YDIM*(zInner));
return index;
}
inline __device__ int f_memLR(int f_num, int x, int y, int z, size_t pitch, int zInner)
{
int index = (x+y*pitch+z*YLRDIM*pitch)+f_num*pitch*YLRDIM*(zInner);
index = dmax(index);
index = dmin(index,19*pitch*YLRDIM*(zInner));
return index;
}
inline __device__ int f_mem_interp(int m_num, int x, int y, int z, int pitch, int zInner)
{
int index = (x+y*pitch+z*(YLRDIM*LRFACTOR+1)*pitch)+m_num*pitch*(YLRDIM*LRFACTOR+1)*(zInner);
index = dmax(index);
index = dmin(index,9*pitch*(YLRDIM*LRFACTOR+1)*(zInner));
return index;
}
inline __device__ int buff_mem_interp(int m_num, int x, int y, int pitch, int zInner)
{
int index = (x+y*pitch+m_num*(YLRDIM*LRFACTOR+1)*pitch);
index = dmax(index);
index = dmin(index,9*pitch*(YLRDIM*LRFACTOR+1));
return index;
}
inline __device__ int buff_mem(int f_num, int x, int y, size_t pitch)
{
int index = (x+y*pitch)+f_num*pitch*YDIM;
index = dmax(index);
index = dmin(index,19*pitch*YDIM);
return index;
}
inline __device__ int buff_memLR(int f_num, int x, int y, size_t pitch)
{
int index = (x+y*pitch)+f_num*pitch*YLRDIM;
index = dmax(index);
index = dmin(index,19*pitch*YLRDIM);
return index;
}
inline __device__ void Moments(float* f, float* m)
{
m[0 ] = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18];
m[1 ] = -30.f*f[0]+-11.f*f[1]+-11.f*f[2]+-11.f*f[3]+-11.f*f[4]+ 8.f*f[5]+ 8.f*f[6]+ 8.f*f[7]+ 8.f*f[8]+-11.f*f[9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18];
m[2 ] = 12.f*f[0]+ -4.f*f[1]+ -4.f*f[2]+ -4.f*f[3]+ -4.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ -4.f*f[9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18];
m[3 ] = f[1]-f[3]+f[5]-f[6]-f[7]+f[8]+f[10]-f[12]+f[15]-f[17];
m[4 ] = -4.f*f[1] + 4.f*f[3] + f[5]+ - f[6]+ - f[7]+ f[8] + f[10] + - f[12] + f[15] + - f[17] ;
m[5 ] = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
m[6 ] = -4.f*f[2] + 4.f*f[4]+ f[5]+ f[6]+ - f[7]+ - f[8] + f[11] + - f[13] + f[16] + - f[18];
m[7 ] = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
m[8 ] = + -4.f*f[9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18];
m[9 ] = 2.f*f[1]+ - f[2]+ 2.f*f[3]+ - f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ - f[9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[10] = -4.f*f[1]+ 2.f*f[2]+ -4.f*f[3]+ 2.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ 2.f*f[9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[11] = f[2] + f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ - f[9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ;
m[12] = -2.f*f[2] -2.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ 2.f*f[9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ;
m[13] = f[5]+ - f[6]+ f[7]+ - f[8] ;
m[14] = f[11] + - f[13] + - f[16] + f[18];
m[15] = f[10] + - f[12] + - f[15] + f[17] ;
m[16] = f[5]+ - f[6]+ - f[7]+ f[8] - f[10] + f[12] + - f[15] + f[17] ;
m[17] = - f[5]+ - f[6]+ f[7]+ f[8] + f[11] + - f[13] + f[16] + - f[18];
m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18];
}
void Moments_host(float* f, float* m)
{
m[0 ] = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18];
m[1 ] = -30.f*f[0]+-11.f*f[1]+-11.f*f[2]+-11.f*f[3]+-11.f*f[4]+ 8.f*f[5]+ 8.f*f[6]+ 8.f*f[7]+ 8.f*f[8]+-11.f*f[9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18];
m[2 ] = 12.f*f[0]+ -4.f*f[1]+ -4.f*f[2]+ -4.f*f[3]+ -4.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ -4.f*f[9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18];
m[3 ] = f[1]-f[3]+f[5]-f[6]-f[7]+f[8]+f[10]-f[12]+f[15]-f[17];
m[4 ] = -4.f*f[1] + 4.f*f[3] + f[5]+ - f[6]+ - f[7]+ f[8] + f[10] + - f[12] + f[15] + - f[17] ;
m[5 ] = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
m[6 ] = -4.f*f[2] + 4.f*f[4]+ f[5]+ f[6]+ - f[7]+ - f[8] + f[11] + - f[13] + f[16] + - f[18];
m[7 ] = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
m[8 ] = + -4.f*f[9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18];
m[9 ] = 2.f*f[1]+ - f[2]+ 2.f*f[3]+ - f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ - f[9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[10] = -4.f*f[1]+ 2.f*f[2]+ -4.f*f[3]+ 2.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ 2.f*f[9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[11] = f[2] + f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ - f[9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ;
m[12] = -2.f*f[2] -2.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ 2.f*f[9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ;
m[13] = f[5]+ - f[6]+ f[7]+ - f[8] ;
m[14] = f[11] + - f[13] + - f[16] + f[18];
m[15] = f[10] + - f[12] + - f[15] + f[17] ;
m[16] = f[5]+ - f[6]+ - f[7]+ f[8] - f[10] + f[12] + - f[15] + f[17] ;
m[17] = - f[5]+ - f[6]+ f[7]+ f[8] + f[11] + - f[13] + f[16] + - f[18];
m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18];
}
void InvertMoments_host(float* f, float* m)
{
float u = m[3];
float v = m[5];
float w = m[7];
f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2]));
f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
}
inline __device__ void mrt_meq(float* meq, float rho, float u, float v, float w)
{
meq[ 0] = rho;
meq[ 1] = -11.f*rho+19.f*(u*u+v*v+w*w);
meq[ 2] = 7.53968254f*(u*u+v*v+w*w);;
meq[ 3] = u;
meq[ 4] = -0.666666667f*u;
meq[ 5] = v;
meq[ 6] = -0.666666667f*v;
meq[ 7] = w;
meq[ 8] = -0.666666667f*w;
meq[ 9] = 2.f*u*u-(v*v+w*w);
meq[11] = v*v-w*w;
meq[13] = u*v;
meq[14] = v*w;
meq[15] = u*w;
}
//outputs physical moments (rho,u,v,w,Pxx,Pww,Pxy,Pyz,Pxz) from f
inline __device__ void PhysicalMoments(float* mom, float* f)
{
mom[0] = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18];
mom[1] = f[1]-f[3]+f[5]-f[6]-f[7]+f[8]+f[10]-f[12]+f[15]-f[17];
mom[2] = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
mom[3] = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
mom[4] = 2.f*f[1]+-f[2]+2.f*f[3]+-f[4]+f[5]+f[6]+f[7]+f[8]+-f[9]+f[10]+-2.f*f[11]+f[12]+-2.f*f[13]+-f[14]+f[15]+-2.f*f[16]+f[17]+-2.f*f[18];
mom[5] = f[2]+f[4]+f[5]+f[6]+f[7]+f[8]+-f[9]+-f[10]+-f[12]+-f[14]+-f[15]+-f[17];
mom[6] = f[5]+-f[6]+f[7]+-f[8];
mom[7] = f[11]+-f[13]+-f[16]+f[18];
mom[8] = f[10]+-f[12]+-f[15]+f[17];
}
inline __device__ void InvertMoments(float* f, float* m)
{
float u = m[3];
float v = m[5];
float w = m[7];
f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2]));
f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
}
inline __device__ void InvertPhysicalMoments(float* f, float* mom, float SF)
{
float m[19]={0};
m[ 0] = mom[0];
m[ 1] = (-11.f*mom[0]+19.f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3]));
m[ 2] = 7.53968254f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3]);
m[ 3] = mom[1];
m[ 4] = -0.666666667f*mom[1];
m[ 5] = mom[2];
m[ 6] = -0.666666667f*mom[2];
m[ 7] = mom[3];
m[ 8] = -0.666666667f*mom[3];
m[ 9] = mom[4]*SF+(1.f-SF)*(2.f*mom[1]*mom[1]-(mom[2]*mom[2]+mom[3]*mom[3]));
m[11] = mom[5]*SF+(1.f-SF)*(mom[2]*mom[2]-mom[3]*mom[3]);
m[13] = mom[6]*SF+(1.f-SF)*mom[1]*mom[2];
m[14] = mom[7]*SF+(1.f-SF)*mom[2]*mom[3];
m[15] = mom[8]*SF+(1.f-SF)*mom[1]*mom[3];
// InvertMoments(f,m);
float u = m[3];
float v = m[5];
float w = m[7];
f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2]));
f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
}
//outputs strain rate tensor (Sxx,Syy,Szz,Sxy,Syz,Sxz) from inputs (m0,m3,m5,m7,m9,m11,m13,m14,m15)
inline __device__ void StrainRate(float* S, float* m_strain, float omega)
{
float m1 = (-11.f*m_strain[0]+19.f*(m_strain[1]*m_strain[1]+m_strain[2]*m_strain[2]+m_strain[3]*m_strain[3]));
float m9 = m_strain[4];
float m11= m_strain[5];
float m13= m_strain[6];
float m14= m_strain[7];
float m15= m_strain[8];
S[0] = -0.026315789f*( m1+19.f*omega* m9);
S[1] = -0.013157895f*(2.f*m1-19.f*omega*(m9-3.f*m11));
S[2] = -0.013157895f*(2.f*m1-19.f*omega*(m9+3.f*m11));
S[3] = -1.5f*omega*m13;
S[4] = -1.5f*omega*m14;
S[5] = -1.5f*omega*m15;
}
inline __device__ void mrt_collide(float* f, float omega)
{
float m[19];
//float u,v,w;
m[3] = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
m[5] = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
m[7] = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
m[0] = f[ 0]+f[ 1]+f[ 2]+f[ 3]+f[ 4]+f[ 5]+f[ 6]+f[ 7]+f[ 8]+f[ 9]+
f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18];
m[ 1] = 19.f*(-f[ 0]+ f[ 5]+f[ 6]+f[ 7]+f[ 8]+f[10]+f[11]+f[12]+f[13]+f[15]+f[16]+f[17]+f[18] -(m[3]*m[3]+m[5]*m[5]+m[7]*m[7]));//+8.f*(f[ 5]+f[ 6]+f[ 7]+f[ 8]+f[10]+f[11]+f[12]+f[13]+f[15]+f[16]+f[17]+f[18]);
m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18] -7.53968254f*(m[3]*m[3]+m[5]*m[5]+m[7]*m[7]);
m[ 4] = 1.666666667f*(-3.f*f[1]+3.f*f[ 3]+m[3]);
m[ 6] = 1.666666667f*(-3.f*f[2]+3.f*f[ 4]+m[5]);
m[ 8] = 1.666666667f*(-3.f*f[9]+3.f*f[14]+m[7]);
m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+- f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+-2.f*f[13]+- f[14]+ f[15]+ -2.f*f[16]+ f[17]+-2.f*f[18] -(2.f*m[3]*m[3]-(m[5]*m[5]+m[7]*m[7]));
m[10] =-4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+-2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+-2.f*f[18];
m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+- f[ 9]+-f[10] +-f[12] +- f[14]+-f[15] +-f[17] -(m[5]*m[5]-m[7]*m[7]);
m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+-f[10] +-f[12] + 2.f*f[14]+-f[15] +-f[17] ;
m[13] = f[ 5]+-f[ 6]+ f[ 7]+-f[ 8] -m[3]*m[5];
m[14] = f[11] +- f[13] + - f[16] + f[18] -m[5]*m[7];
m[15] = f[10] + - f[12] +-f[15] + f[17] -m[3]*m[7];
m[16] = f[ 5]+-f[ 6]+-f[ 7]+ f[ 8] -f[10] + f[12] +-f[15] + f[17] ;
m[17] = -f[ 5]+-f[ 6]+ f[ 7]+ f[ 8] + f[11] +- f[13] + f[16] +- f[18];
m[18] = f[10]+- f[11]+ f[12]+- f[13] +-f[15]+ f[16]+-f[17]+ f[18];
if(SmagLES == 1)
{
float Pxx = 0.33333333f*(m[1]+2.f*m[0])+m[9];
float Pyy = Pxx+0.5f*(m[11]-m[9]);//0.3333333f*(m[1]+2.f*m[0]+0.5f*(3.f*m[11]-m[9]));
float Pzz = Pyy-m[11];
float Q11 = 0.33333333f*m[0]+m[3]*m[3]-Pxx;
float Q22 = 0.33333333f*m[0]+m[5]*m[5]-Pyy;
float Q33 = 0.33333333f*m[0]+m[7]*m[7]-Pzz;
float Q12 = 0.33333333f*m[0]+m[3]*m[5]-m[13];
float Q23 = 0.33333333f*m[0]+m[5]*m[7]-m[14];
float Q13 = 0.33333333f*m[0]+m[3]*m[7]-m[15];
float Q = sqrt(Q11*Q11+Q22*Q22+Q33*Q33+2.f*Q12*Q12+2.f*Q23*Q23+2.f*Q13*Q13);
float tau0 = 1.f/omega;
float tau = 0.5f*tau0+0.5f*sqrt(tau0*tau0+18.f*CS*sqrt(2.f)*Q);
omega = 1.f/tau;
}
f[ 0] -=- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2]);
f[ 1] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])*omega-m[10]);
f[ 2] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])*omega-m[10])+ 0.083333333f*((m[11])*omega-m[12]);
f[ 3] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])*omega-m[10]);
f[ 4] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])*omega-m[10])+ 0.083333333f*((m[11])*omega-m[12]);
f[ 5] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ omega*(0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13])));
f[ 6] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ omega*(0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13])));
f[ 7] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ omega*(0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13])));
f[ 8] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ omega*(0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13])));
f[ 9] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])*omega-m[10])+-0.083333333f*((m[11])*omega-m[12]);
f[10] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ omega*(0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15])));
f[11] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+omega*(-0.055555556f*(m[ 9]) +( 0.25f*(m[14])));
f[12] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ omega*(0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15])));
f[13] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+omega*(-0.055555556f*(m[ 9]) +(-0.25f*(m[14])));
f[14] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])*omega-m[10])+-0.083333333f*((m[11])*omega-m[12]);
f[15] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ omega*(0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15])));
f[16] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+omega*(-0.055555556f*(m[ 9]) +(-0.25f*(m[14])));
f[17] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ omega*(0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15])));
f[18] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+omega*(-0.055555556f*(m[ 9]) +( 0.25f*(m[14])));
}
inline __device__ void North_Extrap(float* f, float rho)
{
float m[19];
//rho = 1.0f;
float u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
float v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
float w = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
m[ 1] = -30.f*f[ 0]+-11.f*f[ 1]+-11.f*f[ 2]+-11.f*f[ 3]+-11.f*f[ 4]+ 8.f*f[ 5]+ 8.f*f[ 6]+ 8.f*f[ 7]+ 8.f*f[ 8]+-11.f*f[ 9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18];
m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18];
m[ 4] = -4.f*f[ 1] + 4.f*f[ 3] + f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] + f[10] + - f[12] + f[15] + - f[17] ;
m[ 6] = -4.f*f[ 2] + 4.f*f[ 4]+ f[ 5]+ f[ 6]+ - f[ 7]+ - f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[ 8] = + -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18];
m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[10] = -4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ;
m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ;
m[13] = f[ 5]+ - f[ 6]+ f[ 7]+ - f[ 8] ;
m[14] = f[11] + - f[13] + - f[16] + f[18];
m[15] = f[10] + - f[12] + - f[15] + f[17] ;
m[16] = f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] - f[10] + f[12] + - f[15] + f[17] ;
m[17] = - f[ 5]+ - f[ 6]+ f[ 7]+ f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18];
f[ 0] =(0.052631579f*rho +- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2]));
f[ 1] =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 2] =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 3] =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 4] =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 5] =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 6] =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 7] =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 8] =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 9] =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
}
inline __device__ void South_Extrap(float* f, float v)
{
float m[19];
float u = 0.f;//f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
float w = 0.f;//f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
float rho = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18];
m[ 1] = -30.f*f[ 0]+-11.f*f[ 1]+-11.f*f[ 2]+-11.f*f[ 3]+-11.f*f[ 4]+ 8.f*f[ 5]+ 8.f*f[ 6]+ 8.f*f[ 7]+ 8.f*f[ 8]+-11.f*f[ 9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18];
m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18];
m[ 4] = -4.f*f[ 1] + 4.f*f[ 3] + f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] + f[10] + - f[12] + f[15] + - f[17] ;
m[ 6] = -4.f*f[ 2] + 4.f*f[ 4]+ f[ 5]+ f[ 6]+ - f[ 7]+ - f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[ 8] = + -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18];
m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[10] = -4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ;
m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ;
m[13] = f[ 5]+ - f[ 6]+ f[ 7]+ - f[ 8] ;
m[14] = f[11] + - f[13] + - f[16] + f[18];
m[15] = f[10] + - f[12] + - f[15] + f[17] ;
m[16] = f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] - f[10] + f[12] + - f[15] + f[17] ;
m[17] = - f[ 5]+ - f[ 6]+ f[ 7]+ f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18];
f[ 0] =(0.052631579f*rho +- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2]));
f[ 1] =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 2] =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 3] =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 4] =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 5] =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 6] =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 7] =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 8] =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 9] =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
}
inline __device__ void East_Extrap(float* f, float rho)
{
float m[19];
//rho = 0.0f;
float u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
float v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
float w = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
m[ 1] = -30.f*f[ 0]+-11.f*f[ 1]+-11.f*f[ 2]+-11.f*f[ 3]+-11.f*f[ 4]+ 8.f*f[ 5]+ 8.f*f[ 6]+ 8.f*f[ 7]+ 8.f*f[ 8]+-11.f*f[ 9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18];
m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18];
m[ 4] = -4.f*f[ 1] + 4.f*f[ 3] + f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] + f[10] + - f[12] + f[15] + - f[17] ;
m[ 6] = -4.f*f[ 2] + 4.f*f[ 4]+ f[ 5]+ f[ 6]+ - f[ 7]+ - f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[ 8] = + -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18];
m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[10] = -4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ;
m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ;
m[13] = f[ 5]+ - f[ 6]+ f[ 7]+ - f[ 8] ;
m[14] = f[11] + - f[13] + - f[16] + f[18];
m[15] = f[10] + - f[12] + - f[15] + f[17] ;
m[16] = f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] - f[10] + f[12] + - f[15] + f[17] ;
m[17] = - f[ 5]+ - f[ 6]+ f[ 7]+ f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18];
f[ 0] =(0.052631579f*rho +- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2]));
f[ 1] =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 2] =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 3] =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 4] =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 5] =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 6] =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 7] =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 8] =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 9] =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
}
inline __device__ void West_Extrap(float* f, float u)
{
float m[19];
float v = 0.f;//f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
float w = 0.f;//f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
float rho = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18];
m[ 1] = -30.f*f[ 0]+-11.f*f[ 1]+-11.f*f[ 2]+-11.f*f[ 3]+-11.f*f[ 4]+ 8.f*f[ 5]+ 8.f*f[ 6]+ 8.f*f[ 7]+ 8.f*f[ 8]+-11.f*f[ 9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18];
m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18];
m[ 4] = -4.f*f[ 1] + 4.f*f[ 3] + f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] + f[10] + - f[12] + f[15] + - f[17] ;
m[ 6] = -4.f*f[ 2] + 4.f*f[ 4]+ f[ 5]+ f[ 6]+ - f[ 7]+ - f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[ 8] = + -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18];
m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[10] = -4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ;
m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ;
m[13] = f[ 5]+ - f[ 6]+ f[ 7]+ - f[ 8] ;
m[14] = f[11] + - f[13] + - f[16] + f[18];
m[15] = f[10] + - f[12] + - f[15] + f[17] ;
m[16] = f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] - f[10] + f[12] + - f[15] + f[17] ;
m[17] = - f[ 5]+ - f[ 6]+ f[ 7]+ f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18];
f[ 0] =(0.052631579f*rho +- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2]));
f[ 1] =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 2] =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 3] =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 4] =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 5] =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 6] =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 7] =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 8] =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 9] =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
}
__device__ void xsymmetry_bot(float* f, int y, int z)
{
if(y == 0 && z == 0){
f[ 2] = f[ 4];
f[13]=f[18];
f[11]=f[18];
f[16]=f[18];
f[ 6] =f[ 7];
f[ 9] =f[14];
f[12]=f[17];
}
else if(y == 0 && z == ZDIM-1){
f[ 4] = f[ 2];
f[11]=f[13];
f[18]=f[13];
f[16]=f[13];
f[ 6] =f[ 7];
f[14]=f[ 9];
f[17]=f[12];
}
else if(y == YDIM-1 && z == 0){
f[ 4] = f[ 2];
f[11]=f[16];
f[18]=f[16];
f[13]=f[16];
f[ 7] =f[ 6];
f[ 9] =f[14];
f[12]=f[17];
}
else if(y == YDIM-1 && z == ZDIM-1){
f[ 4] = f[ 2];
f[16]=f[11];
f[18]=f[11];
f[13]=f[11];
f[ 7] =f[ 6];
f[14]=f[ 9];
f[17]=f[12];
}
else{
if(y == 0){
f[ 2] = f[ 4];
f[11]=f[13];
f[16]=f[18];
f[ 8] = f[ 5];
}
else if(y == YDIM-1){
f[ 4]=f[ 2] ;
f[13]=f[11];
f[18]=f[16];
f[ 5]=f[ 8] ;
}
}
f[ 1] = f[ 3] ;
f[ 5] = f[ 6] ;
f[ 8] = f[ 7] ;
f[10]= f[12];
f[15]= f[17];
}
__device__ void xsymmetry_top(float* f, int y, int z)
{
if(y == 0 && z == 0){
f[ 2] = f[ 4];
f[13] = f[18];
f[11] = f[18];
f[16] = f[18];
f[ 5] = f[ 8];
f[ 9] = f[14];
f[10] = f[15];
}
else if(y == 0 && z == ZDIM-1){
f[ 2] = f[ 4];
f[11] = f[13];
f[18] = f[13];
f[16] = f[13];
f[ 5] = f[ 8];
f[14] = f[ 9];
f[15] = f[10];
}
else if(y == YDIM-1 && z == 0){
f[ 4] = f[ 2];
f[18] = f[16];
f[11] = f[16];
f[13] = f[16];
f[ 8] = f[ 5];
f[ 9] = f[14];
f[10] = f[15];
}
else if(y == YDIM-1 && z == ZDIM-1){
f[ 4] = f[ 2];
f[13] = f[11];
f[16] = f[11];
f[18] = f[11];
f[ 8] = f[ 5];
f[14] = f[ 9];
f[15] = f[10];
}
else{
if(y == 0){
f[ 2] = f[ 4];
f[11] = f[13];
f[16] = f[18];
f[ 5] = f[ 8];
}
else if(y == YDIM-1){
f[ 4] = f[ 2];
f[13] = f[11];
f[18] = f[16];
f[ 8] = f[ 5];
}
}
f[ 3] = f[ 1] ;
f[ 6] = f[ 5] ;
f[ 7] = f[ 8] ;
f[12]= f[10];
f[17]= f[15];
}
inline __device__ void vel_av(float* f, float& uAv, float& vAv, int t)
{
float u,v;//,w;
u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
uAv = (uAv*(t-START_VELAV)+u)/((t-START_VELAV)+1);
vAv = (vAv*(t-START_VELAV)+v)/((t-START_VELAV)+1);
}
inline __device__ void vel_avLR(float* f, float& uAv, float& vAv, float t)
{
float u,v;//,w;
u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
uAv = (uAv*(t-START_VELAV)+u*LRFACTOR)/((t-START_VELAV)+LRFACTOR);
vAv = (vAv*(t-START_VELAV)+v*LRFACTOR)/((t-START_VELAV)+LRFACTOR);
}
inline __device__ void vel_fluc(float* f, float& uAv,
float& vAv, float& ufluc, float& vfluc, int t)
{
float u,v;//,w;
u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
u = (u-uAv)*(u-uAv);
v = (v-vAv)*(v-vAv);
ufluc = (ufluc*(t-START_VELFLUC)+u)/((t-START_VELFLUC)+1);
vfluc = (vfluc*(t-START_VELFLUC)+v)/((t-START_VELFLUC)+1);
}
inline __device__ void vel_flucLR(float* f, float& uAv,
float& vAv, float& ufluc, float& vfluc, float t)
{
float u,v;//,w;
u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
u = (u-uAv)*(u-uAv);
v = (v-vAv)*(v-vAv);
ufluc = (ufluc*(t-START_VELFLUC)+u*LRFACTOR)/((t-START_VELFLUC)+LRFACTOR);
vfluc = (vfluc*(t-START_VELFLUC)+v*LRFACTOR)/((t-START_VELFLUC)+LRFACTOR);
}
__global__ void initialize(float *fout, size_t pitch, int zInner, int GPU_N)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = x;
float ycoord = y;
float zcoord = z+1+GPU_N*ZDIM;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
float f[19] = {0};
float m[19] = {0};
int im = ImageFcn(xcoord,ycoord,zcoord);
float u,v,w,rho;
rho = 1.f;
u = 0.f;
v = 0.f;
w = 0.f;
if(im == 10 || im == 1){
u = 0.0f;
v = 0.0f;
w = 0.0f;
}
mrt_meq(m,rho,u,v,w);
InvertMoments(f,m);
for(int i = 0; i<19; i++)
fout[j+i *pitch*YDIM*zInner]=f[ i];
}
__global__ void initializeLR(float *fout, size_t pitch, int zInner, int GPU_N)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = x;
float ycoord = y;
float zcoord = z+1+GPU_N*(zInner+2);
xcoord = LRX0+x*LRFACTOR;
ycoord = LRY0+y*LRFACTOR;
zcoord = LRZ0+z*LRFACTOR;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
float f[19] = {0};
float m[19] = {0};
int im = ImageFcnLR(xcoord,ycoord,zcoord);
float u,v,w,rho;
rho = 1.f;
u = 0.0f;
v = 0.0f;
w = 0.0f;
if(im == 10 || im == 1){
u = 0.0f;
v = 0.0f;
w = 0.0f;
}
mrt_meq(m,rho,u,v,w);
InvertMoments(f,m);
for(int i = 0; i<19; i++)
fout[j+i *pitch*YLRDIM*zInner]=f[ i];
}
__global__ void update_top(float* hB, float* hA, float* fA, float* temp,
float omega, size_t pitch, int GPU, int zInner, float* FX, float* FY, float* FZ, int t, int flag_F, float* h_interp, size_t pitch_interp)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int j = x+y*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,(GPU+1)*(zInner+2)-1);
float f[19];
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f[0 ]= hA [j];
f[1 ]= hA [buff_mem(1 ,x-1,y ,pitch)];
f[3 ]= hA [buff_mem(3 ,x+1,y ,pitch)];
f[2 ]= hA [buff_mem(2 ,x ,y-1,pitch)];
f[5 ]= hA [buff_mem(5 ,x-1,y-1,pitch)];
f[6 ]= hA [buff_mem(6 ,x+1,y-1,pitch)];
f[4 ]= hA [buff_mem(4 ,x ,y+1,pitch)];
f[7 ]= hA [buff_mem(7 ,x+1,y+1,pitch)];
f[8 ]= hA [buff_mem(8 ,x-1,y+1,pitch)];
f[9 ]= fA [f_mem (9 ,x ,y ,zInner-1,pitch, zInner)];
f[10]= fA [f_mem (10,x-1,y ,zInner-1,pitch, zInner)];
f[11]= fA [f_mem (11,x ,y-1,zInner-1,pitch, zInner)];
f[12]= fA [f_mem (12,x+1,y ,zInner-1,pitch, zInner)];
f[13]= fA [f_mem (13,x ,y+1,zInner-1,pitch, zInner)];
f[14]= temp[buff_mem(14,x ,y ,pitch)];
f[15]= temp[buff_mem(15,x-1,y ,pitch)];
f[16]= temp[buff_mem(16,x ,y-1,pitch)];
f[17]= temp[buff_mem(17,x+1,y ,pitch)];
f[18]= temp[buff_mem(18,x ,y+1,pitch)];
if(im == 1 || im ==10){//BB
if(im == 10 && flag_F == 1){
check[0] = 1;
sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6];
sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17];
sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6];
sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18];
sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13];
sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
hB[buff_mem(0 ,x,y,pitch)] = f[0 ];
hB[buff_mem(1 ,x,y,pitch)] = f[3 ];
hB[buff_mem(2 ,x,y,pitch)] = f[4 ];
hB[buff_mem(3 ,x,y,pitch)] = f[1 ];
hB[buff_mem(4 ,x,y,pitch)] = f[2 ];
hB[buff_mem(5 ,x,y,pitch)] = f[7 ];
hB[buff_mem(6 ,x,y,pitch)] = f[8 ];
hB[buff_mem(7 ,x,y,pitch)] = f[5 ];
hB[buff_mem(8 ,x,y,pitch)] = f[6 ];
hB[buff_mem(9 ,x,y,pitch)] = f[14];
hB[buff_mem(10,x,y,pitch)] = f[17];
hB[buff_mem(11,x,y,pitch)] = f[18];
hB[buff_mem(12,x,y,pitch)] = f[15];
hB[buff_mem(13,x,y,pitch)] = f[16];
hB[buff_mem(14,x,y,pitch)] = f[9 ];
hB[buff_mem(15,x,y,pitch)] = f[12];
hB[buff_mem(16,x,y,pitch)] = f[13];
hB[buff_mem(17,x,y,pitch)] = f[10];
hB[buff_mem(18,x,y,pitch)] = f[11];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
if(im == 100)//north outlet
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x,y-1,pitch)];
North_Extrap(f,1.0f);
}
if(im == 200)//south inlet
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x,y+1,pitch)];
//South_Extrap(f,UMAX);
float u_in = PoisProf3D(x,(GPU+1)*(zInner+2)-1);
South_Extrap(f,u_in);
}
if(im == 300)//east outlet
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x-1,y,pitch)];
East_Extrap(f,1.0f);
}
if(im == 400)//west inlet
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x+1,y,pitch)];
float u_in = PoisProf3D(y,(GPU+1)*(zInner+2)-1);
West_Extrap(f,u_in);
}
if(im == 25)
xsymmetry_top(f,y,(GPU+1)*(zInner+2)-1);
if(im == 26)
xsymmetry_bot(f,y,(GPU+1)*(zInner+2)-1);
mrt_collide(f,omega);
for(int i = 0; i<19; i++)
hB[buff_mem(i ,x,y,pitch)] = f[i ];
}
if(REFINEMENT == 1){
if(x>=int(LRX0)&&x<=int(LRX0+XLRDIM*LRFACTOR)&&y>=int(LRY0)&&y<=int(LRY0+YLRDIM*LRFACTOR))
{
// if(x>int(LRX0+2)&&x<int(LRX0+XLRDIM*LRFACTOR-1)&&y>int(LRY0+2)&&y<int(LRY0+YLRDIM*LRFACTOR-1))
// {
// //do nothing
// }
// else{
// //float rho,u,v,w,m9,m11,m13,m14,m15;
float mom[9];
PhysicalMoments(mom,f);
for(int i = 0; i<9; i++)
h_interp[buff_mem_interp(i,x-int(LRX0),y-int(LRY0),pitch_interp,zInner)]=mom[i];
// }
}
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[t-STARTF],sumX[0]);
atomicAdd(&FY[t-STARTF],sumY[0]);
atomicAdd(&FZ[t-STARTF],sumZ[0]);
}
}
}
__global__ void update_bot(float* gB, float* gA, float* fA, float* temp,
float omega, size_t pitch, int GPU, int zInner, float* FX, float* FY, float* FZ, int t, int flag_F, float* g_interp, size_t pitch_interp)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int j = x+y*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,GPU*(zInner+2));
float f[19];
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f[0 ]= gA [j];
f[1 ]= gA [buff_mem(1 ,x-1,y ,pitch)];
f[3 ]= gA [buff_mem(3 ,x+1,y ,pitch)];
f[2 ]= gA [buff_mem(2 ,x ,y-1,pitch)];
f[5 ]= gA [buff_mem(5 ,x-1,y-1,pitch)];
f[6 ]= gA [buff_mem(6 ,x+1,y-1,pitch)];
f[4 ]= gA [buff_mem(4 ,x ,y+1,pitch)];
f[7 ]= gA [buff_mem(7 ,x+1,y+1,pitch)];
f[8 ]= gA [buff_mem(8 ,x-1,y+1,pitch)];
f[9 ]= temp[buff_mem(9 ,x ,y ,pitch)];
f[10]= temp[buff_mem(10,x-1,y ,pitch)];
f[11]= temp[buff_mem(11,x ,y-1,pitch)];
f[12]= temp[buff_mem(12,x+1,y ,pitch)];
f[13]= temp[buff_mem(13,x ,y+1,pitch)];
f[14]= fA [f_mem (14,x ,y ,0,pitch, zInner)];
f[15]= fA [f_mem (15,x-1,y ,0,pitch, zInner)];
f[16]= fA [f_mem (16,x ,y-1,0,pitch, zInner)];
f[17]= fA [f_mem (17,x+1,y ,0,pitch, zInner)];
f[18]= fA [f_mem (18,x ,y+1,0,pitch, zInner)];
if(im == 1 || im ==10){//BB
if(im == 10 && flag_F == 1){
check[0] = 1;
sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6];
sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17];
sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6];
sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18];
sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13];
sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
gB[buff_mem(0 ,x,y,pitch)] = f[0 ];
gB[buff_mem(1 ,x,y,pitch)] = f[3 ];
gB[buff_mem(2 ,x,y,pitch)] = f[4 ];
gB[buff_mem(3 ,x,y,pitch)] = f[1 ];
gB[buff_mem(4 ,x,y,pitch)] = f[2 ];
gB[buff_mem(5 ,x,y,pitch)] = f[7 ];
gB[buff_mem(6 ,x,y,pitch)] = f[8 ];
gB[buff_mem(7 ,x,y,pitch)] = f[5 ];
gB[buff_mem(8 ,x,y,pitch)] = f[6 ];
gB[buff_mem(9 ,x,y,pitch)] = f[14];
gB[buff_mem(10,x,y,pitch)] = f[17];
gB[buff_mem(11,x,y,pitch)] = f[18];
gB[buff_mem(12,x,y,pitch)] = f[15];
gB[buff_mem(13,x,y,pitch)] = f[16];
gB[buff_mem(14,x,y,pitch)] = f[9 ];
gB[buff_mem(15,x,y,pitch)] = f[12];
gB[buff_mem(16,x,y,pitch)] = f[13];
gB[buff_mem(17,x,y,pitch)] = f[10];
gB[buff_mem(18,x,y,pitch)] = f[11];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
if(im == 100)//north outlet
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x,y-1,pitch)];
North_Extrap(f,1.0f);
}
if(im == 200)//south inlet
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x,y+1,pitch)];
//South_Extrap(f,UMAX);
float u_in = PoisProf3D(x,GPU*(zInner+2));
South_Extrap(f,u_in);
}
if(im == 300)//east outlet
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x-1,y,pitch)];
East_Extrap(f,1.0f);
}
if(im == 400)//west inlet
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x+1,y,pitch)];
float u_in = PoisProf3D(y,GPU*(zInner+2));
West_Extrap(f,u_in);
}
if(im == 25)
xsymmetry_top(f,y,GPU*(zInner+2));
if(im == 26)
xsymmetry_bot(f,y,GPU*(zInner+2));
mrt_collide(f,omega);
for(int i = 0; i<19; i++)
gB[buff_mem(i ,x,y,pitch)] = f[i ];
}
if(REFINEMENT == 1){
if(x>=int(LRX0)&&x<=int(LRX0+XLRDIM*LRFACTOR)&&y>=int(LRY0)&&y<=int(LRY0+YLRDIM*LRFACTOR))
{
// if(x>int(LRX0+2)&&x<int(LRX0+XLRDIM*LRFACTOR-1)&&y>int(LRY0+2)&&y<int(LRY0+YLRDIM*LRFACTOR-1))
// {
// //do nothing
// }
// else{
//float rho,u,v,w,m9,m11,m13,m14,m15;
float mom[9];
PhysicalMoments(mom,f);
for(int i = 0; i<9; i++)
g_interp[buff_mem_interp(i,x-int(LRX0),y-int(LRY0),pitch_interp,zInner)]=mom[i];
// }
}
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[t-STARTF],sumX[0]);
atomicAdd(&FY[t-STARTF],sumY[0]);
atomicAdd(&FZ[t-STARTF],sumZ[0]);
}
}
}
__global__ void update_inn(float* fB, float* fA, float* g, float* h, float omega, size_t pitch, int GPU, int zInner, float* velAv_u, float* velAv_v, float* velFluc_u, float* velFluc_v, float* FX, float* FY, float* FZ, int t, int flag_F, float* f_interp, size_t pitch_interp)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,GPU*(zInner+2)+1+z);
float f[19];
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f[ 0] = fA[j];
f[ 1] = fA[f_mem (1 ,x-1,y ,z ,pitch, zInner)];
f[ 3] = fA[f_mem (3 ,x+1,y ,z ,pitch, zInner)];
f[ 2] = fA[f_mem (2 ,x ,y-1,z ,pitch, zInner)];
f[ 5] = fA[f_mem (5 ,x-1,y-1,z ,pitch, zInner)];
f[ 6] = fA[f_mem (6 ,x+1,y-1,z ,pitch, zInner)];
f[ 4] = fA[f_mem (4 ,x ,y+1,z ,pitch, zInner)];
f[ 7] = fA[f_mem (7 ,x+1,y+1,z ,pitch, zInner)];
f[ 8] = fA[f_mem (8 ,x-1,y+1,z ,pitch, zInner)];
if(z==zInner-1){//top nodes need info from h
f[ 9] = fA[f_mem (9 ,x ,y ,z-1,pitch, zInner)];
f[10]= fA[f_mem (10,x-1,y ,z-1,pitch, zInner)];
f[11]= fA[f_mem (11,x ,y-1,z-1,pitch, zInner)];
f[12]= fA[f_mem (12,x+1,y ,z-1,pitch, zInner)];
f[13]= fA[f_mem (13,x ,y+1,z-1,pitch, zInner)];
f[14]= h [buff_mem(14,x ,y ,pitch)];
f[15]= h [buff_mem(15,x-1,y ,pitch)];
f[16]= h [buff_mem(16,x ,y-1,pitch)];
f[17]= h [buff_mem(17,x+1,y ,pitch)];
f[18]= h [buff_mem(18,x ,y+1,pitch)];
}
else if(z==0){//bottom nodes need info from g
f[ 9] =g [buff_mem(9 ,x ,y ,pitch)];
f[10]= g [buff_mem(10,x-1,y ,pitch)];
f[11]= g [buff_mem(11,x ,y-1,pitch)];
f[12]= g [buff_mem(12,x+1,y ,pitch)];
f[13]= g [buff_mem(13,x ,y+1,pitch)];
f[14]= fA[f_mem (14,x ,y ,z+1,pitch, zInner)];
f[15]= fA[f_mem (15,x-1,y ,z+1,pitch, zInner)];
f[16]= fA[f_mem (16,x ,y-1,z+1,pitch, zInner)];
f[17]= fA[f_mem (17,x+1,y ,z+1,pitch, zInner)];
f[18]= fA[f_mem (18,x ,y+1,z+1,pitch, zInner)];
}
else{//normal nodes
f[ 9] = fA[f_mem(9 ,x ,y ,z-1,pitch,zInner)];
f[10]= fA[f_mem(10,x-1,y ,z-1,pitch,zInner)];
f[11]= fA[f_mem(11,x ,y-1,z-1,pitch,zInner)];
f[12]= fA[f_mem(12,x+1,y ,z-1,pitch,zInner)];
f[13]= fA[f_mem(13,x ,y+1,z-1,pitch,zInner)];
f[14]= fA[f_mem(14,x ,y ,z+1,pitch,zInner)];
f[15]= fA[f_mem(15,x-1,y ,z+1,pitch,zInner)];
f[16]= fA[f_mem(16,x ,y-1,z+1,pitch,zInner)];
f[17]= fA[f_mem(17,x+1,y ,z+1,pitch,zInner)];
f[18]= fA[f_mem(18,x ,y+1,z+1,pitch,zInner)];
}//end normal nodes
if(im == 1 || im ==10){//BB
if(im == 10 && flag_F == 1){
check[0] = 1;
sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6];
sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17];
sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6];
sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18];
sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13];
sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
fB[f_mem(1 ,x,y,z,pitch,zInner)] = f[ 3] ;
fB[f_mem(2 ,x,y,z,pitch,zInner)] = f[ 4] ;
fB[f_mem(3 ,x,y,z,pitch,zInner)] = f[ 1] ;
fB[f_mem(4 ,x,y,z,pitch,zInner)] = f[ 2] ;
fB[f_mem(5 ,x,y,z,pitch,zInner)] = f[ 7] ;
fB[f_mem(6 ,x,y,z,pitch,zInner)] = f[ 8] ;
fB[f_mem(7 ,x,y,z,pitch,zInner)] = f[ 5] ;
fB[f_mem(8 ,x,y,z,pitch,zInner)] = f[ 6] ;
fB[f_mem(9 ,x,y,z,pitch,zInner)] = f[14];
fB[f_mem(10,x,y,z,pitch,zInner)] = f[17];
fB[f_mem(11,x,y,z,pitch,zInner)] = f[18];
fB[f_mem(12,x,y,z,pitch,zInner)] = f[15];
fB[f_mem(13,x,y,z,pitch,zInner)] = f[16];
fB[f_mem(14,x,y,z,pitch,zInner)] = f[ 9] ;
fB[f_mem(15,x,y,z,pitch,zInner)] = f[12];
fB[f_mem(16,x,y,z,pitch,zInner)] = f[13];
fB[f_mem(17,x,y,z,pitch,zInner)] = f[10];
fB[f_mem(18,x,y,z,pitch,zInner)] = f[11];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
if(im == 100)//north outlet
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x,y-1,z,pitch,zInner)];
North_Extrap(f,1.0f);
}
if(im == 200)//south inlet
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x,y+1,z,pitch,zInner)];
//South_Extrap(f,UMAX);
float u_in = PoisProf3D(x,GPU*(zInner+2)+1+z);
South_Extrap(f,u_in);
}
if(im == 300)//east outlet
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x-1,y,z,pitch,zInner)];
East_Extrap(f,1.0f);
}
if(im == 400)//west inlet
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x+1,y,z,pitch,zInner)];
float u_in = PoisProf3D(y,GPU*(zInner+2)+1+z);
West_Extrap(f,u_in);
}
if(im == 25)
xsymmetry_top(f,y,GPU*(zInner+2)+1+z);
if(im == 26)
xsymmetry_bot(f,y,GPU*(zInner+2)+1+z);
mrt_collide(f,omega);
if(VELAV == 1){
if(t>=START_VELAV && t<START_VELFLUC){
float u_Av = velAv_u[x+y*pitch+(z+1)*pitch*YDIM];
float v_Av = velAv_v[x+y*pitch+(z+1)*pitch*YDIM];
vel_av(f,u_Av,v_Av,t);
velAv_u[x+y*pitch+(z+1)*pitch*YDIM] = u_Av;
velAv_v[x+y*pitch+(z+1)*pitch*YDIM] = v_Av;
}
else if(t>=START_VELFLUC){
float u_Av = velAv_u[x+y*pitch+(z+1)*pitch*YDIM];
float v_Av = velAv_v[x+y*pitch+(z+1)*pitch*YDIM];
float u_fluc = velFluc_u[x+y*pitch+(z+1)*pitch*YDIM];
float v_fluc = velFluc_v[x+y*pitch+(z+1)*pitch*YDIM];
vel_fluc(f,u_Av,v_Av,u_fluc,v_fluc,t);
velFluc_u[x+y*pitch+(z+1)*pitch*YDIM] = u_fluc;
velFluc_v[x+y*pitch+(z+1)*pitch*YDIM] = v_fluc;
}
}
for(int i = 0; i<19; i++)
fB[f_mem(i ,x,y,z,pitch,zInner)] = f[ i] ;
}
if(REFINEMENT == 1){
if(x>=int(LRX0)&&x<=int(LRX0+XLRDIM*LRFACTOR)&&y>=int(LRY0)&&y<=int(LRY0+YLRDIM*LRFACTOR))
{
// if(x>int(LRX0+2)&&x<int(LRX0+XLRDIM*LRFACTOR-1)&&y>int(LRY0+2)&&y<int(LRY0+YLRDIM*LRFACTOR-1))
// {
// //do nothing
// }
// else{
//float rho,u,v,w,m9,m11,m13,m14,m15;
float mom[9];
PhysicalMoments(mom,f);
for(int i = 0; i<9; i++)
f_interp[f_mem_interp(i,x-int(LRX0),y-int(LRY0),z,pitch_interp,zInner)]=mom[i];
// }
}
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[t-STARTF],sumX[0]);
atomicAdd(&FY[t-STARTF],sumY[0]);
atomicAdd(&FZ[t-STARTF],sumZ[0]);
}
}
}
__global__ void update_top_LR(float* hB, float* hA, float* fA, float* temp,
float omega, size_t pitch, int GPU, int zInner, float* FX, float* FY, float* FZ, int t, int flag_F)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = (GPU+1)*(zInner+2)-1;//physical coord in LR region
int j = x+y*pitch;//index on padded mem (pitch in elements)
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+LRFACTOR*z;
int im = ImageFcnLR(xcoord,ycoord,zcoord);
float f[19];
__shared__ float sumX[BLOCKSIZELRX], sumY[BLOCKSIZELRX], sumZ[BLOCKSIZELRX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f[0 ]= hA [j];
f[1 ]= hA [buff_memLR(1 ,x-1,y ,pitch)];
f[3 ]= hA [buff_memLR(3 ,x+1,y ,pitch)];
f[2 ]= hA [buff_memLR(2 ,x ,y-1,pitch)];
f[5 ]= hA [buff_memLR(5 ,x-1,y-1,pitch)];
f[6 ]= hA [buff_memLR(6 ,x+1,y-1,pitch)];
f[4 ]= hA [buff_memLR(4 ,x ,y+1,pitch)];
f[7 ]= hA [buff_memLR(7 ,x+1,y+1,pitch)];
f[8 ]= hA [buff_memLR(8 ,x-1,y+1,pitch)];
f[9 ]= fA [ f_memLR(9 ,x ,y ,zInner-1,pitch, zInner)];
f[10]= fA [ f_memLR(10,x-1,y ,zInner-1,pitch, zInner)];
f[11]= fA [ f_memLR(11,x ,y-1,zInner-1,pitch, zInner)];
f[12]= fA [ f_memLR(12,x+1,y ,zInner-1,pitch, zInner)];
f[13]= fA [ f_memLR(13,x ,y+1,zInner-1,pitch, zInner)];
f[14]= temp[buff_memLR(14,x ,y ,pitch)];
f[15]= temp[buff_memLR(15,x-1,y ,pitch)];
f[16]= temp[buff_memLR(16,x ,y-1,pitch)];
f[17]= temp[buff_memLR(17,x+1,y ,pitch)];
f[18]= temp[buff_memLR(18,x ,y+1,pitch)];
if(im == 1 || im ==10){//BB
if(im == 10 && flag_F == 1){
check[0] = 1;
sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6];
sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17];
sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6];
sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18];
sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13];
sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
hB[buff_memLR(0 ,x,y,pitch)] = f[0 ];
hB[buff_memLR(1 ,x,y,pitch)] = f[3 ];
hB[buff_memLR(2 ,x,y,pitch)] = f[4 ];
hB[buff_memLR(3 ,x,y,pitch)] = f[1 ];
hB[buff_memLR(4 ,x,y,pitch)] = f[2 ];
hB[buff_memLR(5 ,x,y,pitch)] = f[7 ];
hB[buff_memLR(6 ,x,y,pitch)] = f[8 ];
hB[buff_memLR(7 ,x,y,pitch)] = f[5 ];
hB[buff_memLR(8 ,x,y,pitch)] = f[6 ];
hB[buff_memLR(9 ,x,y,pitch)] = f[14];
hB[buff_memLR(10,x,y,pitch)] = f[17];
hB[buff_memLR(11,x,y,pitch)] = f[18];
hB[buff_memLR(12,x,y,pitch)] = f[15];
hB[buff_memLR(13,x,y,pitch)] = f[16];
hB[buff_memLR(14,x,y,pitch)] = f[9 ];
hB[buff_memLR(15,x,y,pitch)] = f[12];
hB[buff_memLR(16,x,y,pitch)] = f[13];
hB[buff_memLR(17,x,y,pitch)] = f[10];
hB[buff_memLR(18,x,y,pitch)] = f[11];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
mrt_collide(f,omega);
for(int i = 0; i<19; i++)
hB[buff_memLR(i ,x,y,pitch)] = f[i ];
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[t-STARTF],sumX[0]);
atomicAdd(&FY[t-STARTF],sumY[0]);
atomicAdd(&FZ[t-STARTF],sumZ[0]);
}
}
}
__global__ void update_bot_LR(float* gB, float* gA, float* fA, float* temp,
float omega, size_t pitch, int GPU, int zInner, float* FX, float* FY, float* FZ, int t, int flag_F)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = (zInner+2)-1;
int j = x+y*pitch;//index on padded mem (pitch in elements)
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+GPU*LRFACTOR*z;
int im = ImageFcnLR(xcoord,ycoord,zcoord);
float f[19];
__shared__ float sumX[BLOCKSIZELRX], sumY[BLOCKSIZELRX], sumZ[BLOCKSIZELRX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f[0 ]= gA [j];
f[1 ]= gA [buff_memLR(1 ,x-1,y ,pitch)];
f[3 ]= gA [buff_memLR(3 ,x+1,y ,pitch)];
f[2 ]= gA [buff_memLR(2 ,x ,y-1,pitch)];
f[5 ]= gA [buff_memLR(5 ,x-1,y-1,pitch)];
f[6 ]= gA [buff_memLR(6 ,x+1,y-1,pitch)];
f[4 ]= gA [buff_memLR(4 ,x ,y+1,pitch)];
f[7 ]= gA [buff_memLR(7 ,x+1,y+1,pitch)];
f[8 ]= gA [buff_memLR(8 ,x-1,y+1,pitch)];
f[9 ]= temp[buff_memLR(9 ,x ,y ,pitch)];
f[10]= temp[buff_memLR(10,x-1,y ,pitch)];
f[11]= temp[buff_memLR(11,x ,y-1,pitch)];
f[12]= temp[buff_memLR(12,x+1,y ,pitch)];
f[13]= temp[buff_memLR(13,x ,y+1,pitch)];
f[14]= fA [ f_memLR(14,x ,y ,0,pitch, zInner)];
f[15]= fA [ f_memLR(15,x-1,y ,0,pitch, zInner)];
f[16]= fA [ f_memLR(16,x ,y-1,0,pitch, zInner)];
f[17]= fA [ f_memLR(17,x+1,y ,0,pitch, zInner)];
f[18]= fA [ f_memLR(18,x ,y+1,0,pitch, zInner)];
if(im == 1 || im ==10){//BB
if(im == 10 && flag_F == 1){
check[0] = 1;
sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6];
sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17];
sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6];
sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18];
sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13];
sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
gB[buff_memLR(0 ,x,y,pitch)] = f[0 ];
gB[buff_memLR(1 ,x,y,pitch)] = f[3 ];
gB[buff_memLR(2 ,x,y,pitch)] = f[4 ];
gB[buff_memLR(3 ,x,y,pitch)] = f[1 ];
gB[buff_memLR(4 ,x,y,pitch)] = f[2 ];
gB[buff_memLR(5 ,x,y,pitch)] = f[7 ];
gB[buff_memLR(6 ,x,y,pitch)] = f[8 ];
gB[buff_memLR(7 ,x,y,pitch)] = f[5 ];
gB[buff_memLR(8 ,x,y,pitch)] = f[6 ];
gB[buff_memLR(9 ,x,y,pitch)] = f[14];
gB[buff_memLR(10,x,y,pitch)] = f[17];
gB[buff_memLR(11,x,y,pitch)] = f[18];
gB[buff_memLR(12,x,y,pitch)] = f[15];
gB[buff_memLR(13,x,y,pitch)] = f[16];
gB[buff_memLR(14,x,y,pitch)] = f[9 ];
gB[buff_memLR(15,x,y,pitch)] = f[12];
gB[buff_memLR(16,x,y,pitch)] = f[13];
gB[buff_memLR(17,x,y,pitch)] = f[10];
gB[buff_memLR(18,x,y,pitch)] = f[11];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
mrt_collide(f,omega);
for(int i = 0; i<19; i++)
gB[buff_memLR(i ,x,y,pitch)] = f[i ];
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[t-STARTF],sumX[0]);
atomicAdd(&FY[t-STARTF],sumY[0]);
atomicAdd(&FZ[t-STARTF],sumZ[0]);
}
}
}
__global__ void update_inn_LR(float* fB, float* fA, float* g, float* h, float omega, size_t pitch, int GPU, int zInner, float* velAv_u, float* velAv_v, float* velFluc_u, float* velFluc_v, float* FX, float* FY, float* FZ, int t, int flag_F)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
int im = ImageFcnLR(LRX0+LRFACTOR*x,LRY0+LRFACTOR*y,LRZ0+LRFACTOR*(GPU*(zInner+2)+1+z));
float f[19];
__shared__ float sumX[BLOCKSIZELRX], sumY[BLOCKSIZELRX], sumZ[BLOCKSIZELRX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f[ 0] = fA[j];
f[ 1] = fA[f_memLR (1 ,x-1,y ,z ,pitch, zInner)];
f[ 3] = fA[f_memLR (3 ,x+1,y ,z ,pitch, zInner)];
f[ 2] = fA[f_memLR (2 ,x ,y-1,z ,pitch, zInner)];
f[ 5] = fA[f_memLR (5 ,x-1,y-1,z ,pitch, zInner)];
f[ 6] = fA[f_memLR (6 ,x+1,y-1,z ,pitch, zInner)];
f[ 4] = fA[f_memLR (4 ,x ,y+1,z ,pitch, zInner)];
f[ 7] = fA[f_memLR (7 ,x+1,y+1,z ,pitch, zInner)];
f[ 8] = fA[f_memLR (8 ,x-1,y+1,z ,pitch, zInner)];
if(z==zInner-1){//top nodes need info from h
f[ 9] =fA[ f_memLR(9 ,x ,y ,z-1,pitch, zInner)];
f[10]= fA[ f_memLR(10,x-1,y ,z-1,pitch, zInner)];
f[11]= fA[ f_memLR(11,x ,y-1,z-1,pitch, zInner)];
f[12]= fA[ f_memLR(12,x+1,y ,z-1,pitch, zInner)];
f[13]= fA[ f_memLR(13,x ,y+1,z-1,pitch, zInner)];
f[14]= h [buff_memLR(14,x ,y ,pitch)];
f[15]= h [buff_memLR(15,x-1,y ,pitch)];
f[16]= h [buff_memLR(16,x ,y-1,pitch)];
f[17]= h [buff_memLR(17,x+1,y ,pitch)];
f[18]= h [buff_memLR(18,x ,y+1,pitch)];
}
else if(z==0){//bottom nodes need info from g
f[ 9] =g [buff_memLR(9 ,x ,y ,pitch)];
f[10]= g [buff_memLR(10,x-1,y ,pitch)];
f[11]= g [buff_memLR(11,x ,y-1,pitch)];
f[12]= g [buff_memLR(12,x+1,y ,pitch)];
f[13]= g [buff_memLR(13,x ,y+1,pitch)];
f[14]= fA[ f_memLR(14,x ,y ,z+1,pitch, zInner)];
f[15]= fA[ f_memLR(15,x-1,y ,z+1,pitch, zInner)];
f[16]= fA[ f_memLR(16,x ,y-1,z+1,pitch, zInner)];
f[17]= fA[ f_memLR(17,x+1,y ,z+1,pitch, zInner)];
f[18]= fA[ f_memLR(18,x ,y+1,z+1,pitch, zInner)];
}
else{//normal nodes
f[ 9] =fA[f_memLR(9 ,x ,y ,z-1,pitch,zInner)];
f[10]= fA[f_memLR(10,x-1,y ,z-1,pitch,zInner)];
f[11]= fA[f_memLR(11,x ,y-1,z-1,pitch,zInner)];
f[12]= fA[f_memLR(12,x+1,y ,z-1,pitch,zInner)];
f[13]= fA[f_memLR(13,x ,y+1,z-1,pitch,zInner)];
f[14]= fA[f_memLR(14,x ,y ,z+1,pitch,zInner)];
f[15]= fA[f_memLR(15,x-1,y ,z+1,pitch,zInner)];
f[16]= fA[f_memLR(16,x ,y-1,z+1,pitch,zInner)];
f[17]= fA[f_memLR(17,x+1,y ,z+1,pitch,zInner)];
f[18]= fA[f_memLR(18,x ,y+1,z+1,pitch,zInner)];
}//end normal nodes
if(im == 1 || im ==10){//BB
if(im == 10 && flag_F == 1){
check[0] = 1;
sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6];
sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17];
sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6];
sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18];
sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13];
sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
fB[f_memLR(1 ,x,y,z,pitch,zInner)] = f[ 3] ;
fB[f_memLR(2 ,x,y,z,pitch,zInner)] = f[ 4] ;
fB[f_memLR(3 ,x,y,z,pitch,zInner)] = f[ 1] ;
fB[f_memLR(4 ,x,y,z,pitch,zInner)] = f[ 2] ;
fB[f_memLR(5 ,x,y,z,pitch,zInner)] = f[ 7] ;
fB[f_memLR(6 ,x,y,z,pitch,zInner)] = f[ 8] ;
fB[f_memLR(7 ,x,y,z,pitch,zInner)] = f[ 5] ;
fB[f_memLR(8 ,x,y,z,pitch,zInner)] = f[ 6] ;
fB[f_memLR(9 ,x,y,z,pitch,zInner)] = f[14];
fB[f_memLR(10,x,y,z,pitch,zInner)] = f[17];
fB[f_memLR(11,x,y,z,pitch,zInner)] = f[18];
fB[f_memLR(12,x,y,z,pitch,zInner)] = f[15];
fB[f_memLR(13,x,y,z,pitch,zInner)] = f[16];
fB[f_memLR(14,x,y,z,pitch,zInner)] = f[ 9] ;
fB[f_memLR(15,x,y,z,pitch,zInner)] = f[12];
fB[f_memLR(16,x,y,z,pitch,zInner)] = f[13];
fB[f_memLR(17,x,y,z,pitch,zInner)] = f[10];
fB[f_memLR(18,x,y,z,pitch,zInner)] = f[11];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
mrt_collide(f,omega);
if(VELAV == 1){
if(t>=START_VELAV && t<START_VELFLUC){
float u_Av = velAv_u[x+y*pitch+(z+1)*pitch*YLRDIM];
float v_Av = velAv_v[x+y*pitch+(z+1)*pitch*YLRDIM];
vel_avLR(f,u_Av,v_Av,t);
velAv_u[x+y*pitch+(z+1)*pitch*YLRDIM] = u_Av;
velAv_v[x+y*pitch+(z+1)*pitch*YLRDIM] = v_Av;
}
else if(t>=START_VELFLUC){
float u_Av = velAv_u[x+y*pitch+(z+1)*pitch*YLRDIM];
float v_Av = velAv_v[x+y*pitch+(z+1)*pitch*YLRDIM];
float u_fluc = velFluc_u[x+y*pitch+(z+1)*pitch*YLRDIM];
float v_fluc = velFluc_v[x+y*pitch+(z+1)*pitch*YLRDIM];
vel_flucLR(f,u_Av,v_Av,u_fluc,v_fluc,t);
velFluc_u[x+y*pitch+(z+1)*pitch*YLRDIM] = u_fluc;
velFluc_v[x+y*pitch+(z+1)*pitch*YLRDIM] = v_fluc;
}
}
for(int i = 0; i<19; i++)
fB[f_memLR(i ,x,y,z,pitch,zInner)] = f[ i] ;
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[t-STARTF],sumX[0]);
atomicAdd(&FY[t-STARTF],sumY[0]);
atomicAdd(&FZ[t-STARTF],sumZ[0]);
}
}
}
/*
InterpCF is used on the LR grid. It first uses part of its threads to read from the coarse mesh nodes that completely envelope the fine mesh nodes, and loads the f's into shared memory. Next, all threads use the shared memory data to interpolate and scale the f's
*/
__global__ void InterpCF(float* f_f, float* g_f, float* h_f, size_t pitch_f, float* m_f_c, float* m_g_c, float* m_h_c, float* m_g_temp, size_t pitch_m, float SF, float omega_c, int zInner, int zInner_f)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
__shared__ float mom_c[BLOCKSIZEINTERP][2][2][9];
__shared__ float S_c[BLOCKSIZEINTERP][2][2][6];
int GPU = 0;
int im = ImageFcnLR(LRX0+LRFACTOR*x,LRY0+LRFACTOR*y,LRZ0+LRFACTOR*(GPU*(zInner+2)+z));
if(blockIdx.z == 0 && threadIdx.x<ceil(BLOCKSIZEINTERP*LRFACTOR)+1 && threadIdx.z<2 && threadIdx.y<2)
{
//use g and g_temp
int x_c = threadIdx.x+blockIdx.x*BLOCKSIZEINTERP*LRFACTOR;//in coarse grid, blockdim.x is LRX*LRFACTOR
int y_c = threadIdx.y+blockIdx.y;//in coarse grid, blockdim.y is 1
int ymax = YLRDIM*LRFACTOR+1;
if(threadIdx.z == 0){
for(int i = 0; i<9; i++)
mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_g_temp[x_c+y_c*pitch_m+i*ymax*pitch_m];
}
else{
for(int i = 0; i<9; i++)
mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_g_c[x_c+y_c*pitch_m+i*ymax*pitch_m];
}
// float S[6];//float m_strain[9];
// for(int i = 0; i<9; i++)
// m_strain[i] = mom_c[i][threadIdx.x][threadIdx.y][threadIdx.z];
// for(int i = 0; i<6; i++)
// S_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= S[i];
StrainRate(S_c[threadIdx.x][threadIdx.y][threadIdx.z],mom_c[threadIdx.x][threadIdx.y][threadIdx.z],omega_c);
}
else if(blockIdx.z == 1 && threadIdx.x<ceil(BLOCKSIZEINTERP*LRFACTOR)+1 && threadIdx.z<2 && threadIdx.y<2)
{
//use g and f
int x_c = threadIdx.x+blockIdx.x*BLOCKSIZEINTERP*LRFACTOR;//in coarse grid, blockdim.x is LRX*LRFACTOR
int y_c = threadIdx.y+blockIdx.y;//in coarse grid, blockdim.y is 1
int ymax = YLRDIM*LRFACTOR+1;
if(threadIdx.z == 0){
for(int i = 0; i<9; i++)
mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_g_c[x_c+y_c*pitch_m+i*ymax*pitch_m];
}
else{
for(int i = 0; i<9; i++)
mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_f_c[x_c+y_c*pitch_m+i*ymax*pitch_m*zInner];
}
// float S[6];//float m_strain[9];
// for(int i = 0; i<9; i++)
// m_strain[i] = mom_c[i][threadIdx.x][threadIdx.y][threadIdx.z];
// for(int i = 0; i<6; i++)
// S_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= S[i];
StrainRate(S_c[threadIdx.x][threadIdx.y][threadIdx.z],mom_c[threadIdx.x][threadIdx.y][threadIdx.z],omega_c);
}
else if(blockIdx.z == zInner+1 && threadIdx.x<ceil(BLOCKSIZEINTERP*LRFACTOR)+1 && threadIdx.z<2 && threadIdx.y<2)
{
//use h and f
int x_c = threadIdx.x+blockIdx.x*BLOCKSIZEINTERP*LRFACTOR;//in coarse grid, blockdim.x is LRX*LRFACTOR
int y_c = threadIdx.y+blockIdx.y;//in coarse grid, blockdim.y is 1
int ymax = YLRDIM*LRFACTOR+1;
if(threadIdx.z == 0){
for(int i = 0; i<9; i++)
mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_f_c[x_c+y_c*pitch_m+(zInner-1)*ymax*pitch_m+i*ymax*pitch_m*zInner];
}
else{
for(int i = 0; i<9; i++)
mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_h_c[x_c+y_c*pitch_m+i*ymax*pitch_m];
}
// float S[6];//float m_strain[9];
// for(int i = 0; i<9; i++)
// m_strain[i] = mom_c[i][threadIdx.x][threadIdx.y][threadIdx.z];
// for(int i = 0; i<6; i++)
// S_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= S[i];
StrainRate(S_c[threadIdx.x][threadIdx.y][threadIdx.z],mom_c[threadIdx.x][threadIdx.y][threadIdx.z],omega_c);
}
else if(threadIdx.x<ceil(BLOCKSIZEINTERP*LRFACTOR)+1 && threadIdx.z<2 && threadIdx.y<2){//use f only
int x_c = threadIdx.x+blockIdx.x*BLOCKSIZEINTERP*LRFACTOR;//in coarse grid, blockdim.x is LRX*LRFACTOR
int y_c = threadIdx.y+blockIdx.y;//in coarse grid, blockdim.y is 1
int z_c = threadIdx.z+blockIdx.z-2;//in coarse grid, blockdim.z is 1; -2 to account for g and lower halo
int ymax = YLRDIM*LRFACTOR+1;
for(int i = 0; i<9; i++)
mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_f_c[x_c+y_c*pitch_m+z_c*ymax*pitch_m+i*ymax*pitch_m*zInner];
// float S[6];//float m_strain[9];
// for(int i = 0; i<9; i++)
// m_strain[i] = mom_c[i][threadIdx.x][threadIdx.y][threadIdx.z];
// for(int i = 0; i<6; i++)
// S_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= S[i];
StrainRate(S_c[threadIdx.x][threadIdx.y][threadIdx.z],mom_c[threadIdx.x][threadIdx.y][threadIdx.z],omega_c);
}
syncthreads();
if(x<LRLEVEL || x>XLRDIM-LRLEVEL-1 || y<LRLEVEL || y>YLRDIM-LRLEVEL-1){
//if(x<LRLEVEL || x>XLRDIM-LRLEVEL-2 || y<LRLEVEL || y>YLRDIM-LRLEVEL-2){
//interpolate from shared mem
int xm = int(threadIdx.x*LRFACTOR+LRFACTOR*0.5f);
int ym = int(threadIdx.y*LRFACTOR+LRFACTOR*0.5f);
int zm = int(threadIdx.z*LRFACTOR+LRFACTOR*0.5f);
int xp = xm+1; //int yp = ym+1; int zp = zm+1;
float xf = (threadIdx.x*LRFACTOR+LRFACTOR*0.5f)-xm;
float yf = (threadIdx.y*LRFACTOR+LRFACTOR*0.5f)-ym;
float zf = (threadIdx.z*LRFACTOR+LRFACTOR*0.5f)-zm;
float mom[9];
for(int i = 0; i<9; i++){
float v000 = mom_c[xm][0][0][i];
float v001 = mom_c[xp][0][0][i];
float v010 = mom_c[xm][1][0][i];
float v011 = mom_c[xp][1][0][i];
float v100 = mom_c[xm][0][1][i];
float v101 = mom_c[xp][0][1][i];
float v110 = mom_c[xm][1][1][i];
float v111 = mom_c[xp][1][1][i];
mom[i] = trilinear_interp(v000, v001, v010, v011, v100, v101, v110, v111, xf, yf, zf);
}
if(ORDER == 2)
{
float u_x1,u_x2,u_x3,u_x4,u_x5,u_x6,u_x7,u_x8;
float v_y1,v_y2,v_y3,v_y4,v_y5,v_y6,v_y7,v_y8;
float w_z1,w_z2,w_z3,w_z4,w_z5,w_z6,w_z7,w_z8;
float Sxy1,Sxy2,Sxy3,Sxy4,Sxy5,Sxy6,Sxy7,Sxy8;
float Syz1,Syz2,Syz3,Syz4,Syz5,Syz6,Syz7,Syz8;
float Sxz1,Sxz2,Sxz3,Sxz4,Sxz5,Sxz6,Sxz7,Sxz8;
u_x1=S_c[xm][0][0][0];v_y1=S_c[xm][0][0][1];w_z1=S_c[xm][0][0][2];Sxy1=S_c[xm][0][0][3];Syz1=S_c[xm][0][0][4];Sxz1=S_c[xm][0][0][5];
u_x2=S_c[xp][0][0][0];v_y2=S_c[xp][0][0][1];w_z2=S_c[xp][0][0][2];Sxy2=S_c[xp][0][0][3];Syz2=S_c[xp][0][0][4];Sxz2=S_c[xp][0][0][5];
u_x3=S_c[xm][1][0][0];v_y3=S_c[xm][1][0][1];w_z3=S_c[xm][1][0][2];Sxy3=S_c[xm][1][0][3];Syz3=S_c[xm][1][0][4];Sxz3=S_c[xm][1][0][5];
u_x4=S_c[xp][1][0][0];v_y4=S_c[xp][1][0][1];w_z4=S_c[xp][1][0][2];Sxy4=S_c[xp][1][0][3];Syz4=S_c[xp][1][0][4];Sxz4=S_c[xp][1][0][5];
u_x5=S_c[xm][0][1][0];v_y5=S_c[xm][0][1][1];w_z5=S_c[xm][0][1][2];Sxy5=S_c[xm][0][1][3];Syz5=S_c[xm][0][1][4];Sxz5=S_c[xm][0][1][5];
u_x6=S_c[xp][0][1][0];v_y6=S_c[xp][0][1][1];w_z6=S_c[xp][0][1][2];Sxy6=S_c[xp][0][1][3];Syz6=S_c[xp][0][1][4];Sxz6=S_c[xp][0][1][5];
u_x7=S_c[xm][1][1][0];v_y7=S_c[xm][1][1][1];w_z7=S_c[xm][1][1][2];Sxy7=S_c[xm][1][1][3];Syz7=S_c[xm][1][1][4];Sxz7=S_c[xm][1][1][5];
u_x8=S_c[xp][1][1][0];v_y8=S_c[xp][1][1][1];w_z8=S_c[xp][1][1][2];Sxy8=S_c[xp][1][1][3];Syz8=S_c[xp][1][1][4];Sxz8=S_c[xp][1][1][5];
float m03,m05,m07, m13,m15,m17, m23,m25,m27, m33,m35,m37, m43,m45,m47, m53,m55,m57, m63,m65,m67, m73,m75,m77;
m03=mom_c[xm][0][0][1];m05=mom_c[xm][0][0][2];m07=mom_c[xm][0][0][3];
m13=mom_c[xp][0][0][1];m15=mom_c[xp][0][0][2];m17=mom_c[xp][0][0][3];
m23=mom_c[xm][1][0][1];m25=mom_c[xm][1][0][2];m27=mom_c[xm][1][0][3];
m33=mom_c[xp][1][0][1];m35=mom_c[xp][1][0][2];m37=mom_c[xp][1][0][3];
m43=mom_c[xm][0][1][1];m45=mom_c[xm][0][1][2];m47=mom_c[xm][0][1][3];
m53=mom_c[xp][0][1][1];m55=mom_c[xp][0][1][2];m57=mom_c[xp][0][1][3];
m63=mom_c[xm][1][1][1];m65=mom_c[xm][1][1][2];m67=mom_c[xm][1][1][3];
m73=mom_c[xp][1][1][1];m75=mom_c[xp][1][1][2];m77=mom_c[xp][1][1][3];
float cx = -((u_x8-u_x7+u_x6-u_x5+u_x4-u_x3+u_x2-u_x1))*0.03125f;
float cy = -((Sxy8+Sxy7-Sxy6-Sxy5+Sxy4+Sxy3-Sxy2-Sxy1)-m75+m65+m55-m45-m35+m25+m15-m05)*0.0625f;
float cz = -((Sxz8+Sxz7+Sxz6+Sxz5-Sxz4-Sxz3-Sxz2-Sxz1)-m77+m67-m57+m47+m37-m27+m17-m07)*0.0625f;
float dx = -((Sxy8-Sxy7+Sxy6-Sxy5+Sxy4-Sxy3+Sxy2-Sxy1)-m73+m63+m53-m43-m33+m23+m13-m03)*0.0625f;
float dy = -((v_y8+v_y7-v_y6-v_y5+v_y4+v_y3-v_y2-v_y1))*0.03125f;
float dz = -((Syz8+Syz7+Syz6+Syz5-Syz4-Syz3-Syz2-Syz1)-m77-m67+m57+m47+m37+m27-m17-m07)*0.0625f;
float ex = -((Sxz8-Sxz7+Sxz6-Sxz5+Sxz4-Sxz3+Sxz2-Sxz1)-m73+m63-m53+m43+m33-m23+m13-m03)*0.0625f;
float ey = -((Syz8+Syz7-Syz6-Syz5+Syz4+Syz3-Syz2-Syz1)-m75-m65+m55+m45+m35+m25-m15-m05)*0.0625f;
float ez = -((w_z8+w_z7+w_z6+w_z5-w_z4-w_z3-w_z2-w_z1))*0.03125f;
float xpr = 4.f*xf*xf-4.f*xf+1.f;
float ypr = 4.f*yf*yf-4.f*yf+1.f;
float zpr = 4.f*zf*zf-4.f*zf+1.f;
mom[1] += cx*(1.f-xpr)+cy*(1.f-ypr)+cz*(1.f-zpr);
mom[2] += dx*(1.f-xpr)+dy*(1.f-ypr)+dz*(1.f-zpr);
mom[3] += ex*(1.f-xpr)+ey*(1.f-ypr)+ez*(1.f-zpr);
}
float f[19];
InvertPhysicalMoments(f,mom,SF);
if(im != 1 && im != 10){
if(z==0){
for(int i = 0; i<19; i++){
g_f[buff_memLR(i,x,y,pitch_f)]=f[i];
}
}
else if(z==gridDim.z*blockDim.z-1){
for(int i = 0; i<19; i++){
h_f[buff_memLR(i,x,y,pitch_f)]=f[i];
}
}
else{
for(int i = 0; i<19; i++){
f_f[f_memLR(i,x,y,z-1,pitch_f,zInner_f)]=f[i];
}
}
}
}
}
__global__ void InterpFC(float* f_c, float* g_c, float* h_c, float* f_f, float* h_f, float* temp_f, size_t pitch_c, size_t pitch_f, float SF, float omega_f, int zInner, int zInner_f)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
//if( (x > LRX0+1 && x < LRX0+XLRDIM*LRFACTOR-1 && y > LRY0+1 && y < LRY0+YLRDIM*LRFACTOR-1) &&
//(x == int(LRX0+2) || x == int(LRX0+XLRDIM*LRFACTOR-2) || y == int(LRY0+2) || y == int(LRY0+YLRDIM*LRFACTOR-2)))
//(true))
if( (x > LRX0+1 && x < LRX0+XLRDIM*LRFACTOR-2 && y > LRY0+1 && y < LRY0+YLRDIM*LRFACTOR-2) &&
//(x == int(LRX0+2) || x == int(LRX0+XLRDIM*LRFACTOR-2) || y == int(LRY0+2) || y == int(LRY0+YLRDIM*LRFACTOR-2)))
(true))
{
float f[19];
float mom[8][9];//physical moments of 8 neighboring nodes
float S_f[8][6];//strain rate tensor of 8 neighboring nodes
int xm = LRLEVEL*(x-LRX0);
int ym = LRLEVEL*(y-LRY0);
int zm = LRLEVEL*(z-(-(1.f-0.5f*LRFACTOR)))-1;//LRZ0=-(1.f-0.5f*LRFACTOR), and -1 to account for g_LR
int xp = xm+1;
int yp = ym+1;
int zp = zm+1;
//top nodes. interp between h and h_temp. output to h
if(z == zInner+1)
{
for(int i = 0; i<19; i++)
f[i] = temp_f[buff_memLR(i,xm,ym,pitch_f)];
PhysicalMoments(mom[0],f);
StrainRate(S_f[0],mom[0],omega_f);
for(int i = 0; i<19; i++)
f[i] = temp_f[buff_memLR(i,xp,ym,pitch_f)];
PhysicalMoments(mom[1],f);
StrainRate(S_f[1],mom[1],omega_f);
for(int i = 0; i<19; i++)
f[i] = temp_f[buff_memLR(i,xm,yp,pitch_f)];
PhysicalMoments(mom[2],f);
StrainRate(S_f[2],mom[2],omega_f);
for(int i = 0; i<19; i++)
f[i] = temp_f[buff_memLR(i,xp,yp,pitch_f)];
PhysicalMoments(mom[3],f);
StrainRate(S_f[3],mom[3],omega_f);
for(int i = 0; i<19; i++)
f[i] = h_f[buff_memLR(i,xm,ym,pitch_f)];
PhysicalMoments(mom[4],f);
StrainRate(S_f[4],mom[4],omega_f);
for(int i = 0; i<19; i++)
f[i] = h_f[buff_memLR(i,xp,ym,pitch_f)];
PhysicalMoments(mom[5],f);
StrainRate(S_f[5],mom[5],omega_f);
for(int i = 0; i<19; i++)
f[i] = h_f[buff_memLR(i,xm,yp,pitch_f)];
PhysicalMoments(mom[6],f);
StrainRate(S_f[6],mom[6],omega_f);
for(int i = 0; i<19; i++)
f[i] = h_f[buff_memLR(i,xp,yp,pitch_f)];
PhysicalMoments(mom[7],f);
StrainRate(S_f[7],mom[7],omega_f);
}
//inner nodes. output to g or f
else{
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xm,ym,zm,pitch_f,zInner_f)];
PhysicalMoments(mom[0],f);
StrainRate(S_f[0],mom[0],omega_f);
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xp,ym,zm,pitch_f,zInner_f)];
PhysicalMoments(mom[1],f);
StrainRate(S_f[1],mom[1],omega_f);
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xm,yp,zm,pitch_f,zInner_f)];
PhysicalMoments(mom[2],f);
StrainRate(S_f[2],mom[2],omega_f);
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xp,yp,zm,pitch_f,zInner_f)];
PhysicalMoments(mom[3],f);
StrainRate(S_f[3],mom[3],omega_f);
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xm,ym,zp,pitch_f,zInner_f)];
PhysicalMoments(mom[4],f);
StrainRate(S_f[4],mom[4],omega_f);
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xp,ym,zp,pitch_f,zInner_f)];
PhysicalMoments(mom[5],f);
StrainRate(S_f[5],mom[5],omega_f);
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xm,yp,zp,pitch_f,zInner_f)];
PhysicalMoments(mom[6],f);
StrainRate(S_f[6],mom[6],omega_f);
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xp,yp,zp,pitch_f,zInner_f)];
PhysicalMoments(mom[7],f);
StrainRate(S_f[7],mom[7],omega_f);
}
if(ORDER == 1){
for(int i = 0; i<9; i++)
mom[0][i] = 0.125f*(mom[0][i]+mom[1][i]+mom[2][i]+mom[3][i]+mom[4][i]+mom[5][i]+mom[6][i]+mom[7][i]);
}
else if(ORDER == 2)
{
float u_x1,u_x2,u_x3,u_x4,u_x5,u_x6,u_x7,u_x8;
float v_y1,v_y2,v_y3,v_y4,v_y5,v_y6,v_y7,v_y8;
float w_z1,w_z2,w_z3,w_z4,w_z5,w_z6,w_z7,w_z8;
float Sxy1,Sxy2,Sxy3,Sxy4,Sxy5,Sxy6,Sxy7,Sxy8;
float Syz1,Syz2,Syz3,Syz4,Syz5,Syz6,Syz7,Syz8;
float Sxz1,Sxz2,Sxz3,Sxz4,Sxz5,Sxz6,Sxz7,Sxz8;
u_x1=S_f[0][0];v_y1=S_f[0][1];w_z1=S_f[0][2];Sxy1=S_f[0][3];Syz1=S_f[0][4];Sxz1=S_f[0][5];
u_x2=S_f[1][0];v_y2=S_f[1][1];w_z2=S_f[1][2];Sxy2=S_f[1][3];Syz2=S_f[1][4];Sxz2=S_f[1][5];
u_x3=S_f[2][0];v_y3=S_f[2][1];w_z3=S_f[2][2];Sxy3=S_f[2][3];Syz3=S_f[2][4];Sxz3=S_f[2][5];
u_x4=S_f[3][0];v_y4=S_f[3][1];w_z4=S_f[3][2];Sxy4=S_f[3][3];Syz4=S_f[3][4];Sxz4=S_f[3][5];
u_x5=S_f[4][0];v_y5=S_f[4][1];w_z5=S_f[4][2];Sxy5=S_f[4][3];Syz5=S_f[4][4];Sxz5=S_f[4][5];
u_x6=S_f[5][0];v_y6=S_f[5][1];w_z6=S_f[5][2];Sxy6=S_f[5][3];Syz6=S_f[5][4];Sxz6=S_f[5][5];
u_x7=S_f[6][0];v_y7=S_f[6][1];w_z7=S_f[6][2];Sxy7=S_f[6][3];Syz7=S_f[6][4];Sxz7=S_f[6][5];
u_x8=S_f[7][0];v_y8=S_f[7][1];w_z8=S_f[7][2];Sxy8=S_f[7][3];Syz8=S_f[7][4];Sxz8=S_f[7][5];
float m03,m05,m07, m13,m15,m17, m23,m25,m27, m33,m35,m37, m43,m45,m47, m53,m55,m57, m63,m65,m67, m73,m75,m77;
m03=mom[0][1];m05=mom[0][2];m07=mom[0][3];
m13=mom[1][1];m15=mom[1][2];m17=mom[1][3];
m23=mom[2][1];m25=mom[2][2];m27=mom[2][3];
m33=mom[3][1];m35=mom[3][2];m37=mom[3][3];
m43=mom[4][1];m45=mom[4][2];m47=mom[4][3];
m53=mom[5][1];m55=mom[5][2];m57=mom[5][3];
m63=mom[6][1];m65=mom[6][2];m67=mom[6][3];
m73=mom[7][1];m75=mom[7][2];m77=mom[7][3];
float cx = -((u_x8-u_x7+u_x6-u_x5+u_x4-u_x3+u_x2-u_x1))*0.03125f;
float cy = -((Sxy8+Sxy7-Sxy6-Sxy5+Sxy4+Sxy3-Sxy2-Sxy1)-m75+m65+m55-m45-m35+m25+m15-m05)*0.0625f;
float cz = -((Sxz8+Sxz7+Sxz6+Sxz5-Sxz4-Sxz3-Sxz2-Sxz1)-m77+m67-m57+m47+m37-m27+m17-m07)*0.0625f;
float dx = -((Sxy8-Sxy7+Sxy6-Sxy5+Sxy4-Sxy3+Sxy2-Sxy1)-m73+m63+m53-m43-m33+m23+m13-m03)*0.0625f;
float dy = -((v_y8+v_y7-v_y6-v_y5+v_y4+v_y3-v_y2-v_y1))*0.03125f;
float dz = -((Syz8+Syz7+Syz6+Syz5-Syz4-Syz3-Syz2-Syz1)-m77-m67+m57+m47+m37+m27-m17-m07)*0.0625f;
float ex = -((Sxz8-Sxz7+Sxz6-Sxz5+Sxz4-Sxz3+Sxz2-Sxz1)-m73+m63-m53+m43+m33-m23+m13-m03)*0.0625f;
float ey = -((Syz8+Syz7-Syz6-Syz5+Syz4+Syz3-Syz2-Syz1)-m75-m65+m55+m45+m35+m25-m15-m05)*0.0625f;
float ez = -((w_z8+w_z7+w_z6+w_z5-w_z4-w_z3-w_z2-w_z1))*0.03125f;
for(int i = 0; i<9; i++)
mom[0][i] = 0.125f*(mom[0][i]+mom[1][i]+mom[2][i]+mom[3][i]+mom[4][i]+mom[5][i]+mom[6][i]+mom[7][i]);
float xpr = 0.f;//4.f*xf*xf-4.f*xf+1.f;
float ypr = 0.f;//4.f*yf*yf-4.f*yf+1.f;
float zpr = 0.f;//4.f*zf*zf-4.f*zf+1.f;
mom[0][1] += cx*(1.f-xpr)+cy*(1.f-ypr)+cz*(1.f-zpr);
mom[0][2] += dx*(1.f-xpr)+dy*(1.f-ypr)+dz*(1.f-zpr);
mom[0][3] += ex*(1.f-xpr)+ey*(1.f-ypr)+ez*(1.f-zpr);
}
InvertPhysicalMoments(f,mom[0],SF);
//for(int i = 0; i<19; i++) f[i] = 0.1f;
int GPU = 0;
int im = ImageFcn(x,y,GPU*(zInner+2)+z);
if(im != 1 && im != 10){
if(z == 0){
for(int i = 0; i<19; i++)
g_c[buff_mem(i,x,y,pitch_c)]=f[i];
}
else if(z == zInner+1){
for(int i = 0; i<19; i++)
h_c[buff_mem(i,x,y,pitch_c)]=f[i];
}
else{
for(int i = 0; i<19; i++)
f_c[f_mem(i,x,y,z-1,pitch_c,zInner)]=f[i];
}
}
}//end extraction region
}
void WriteResults(ostream &output, float *fin, float *gin, float *hin, float **velAv,
float **velFluc, float omega, int GPU_N, int GPU)
{
float f[19];
output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"velAv[0]\",\"velAv[1]\",\"ufluc\",\"vfluc\"\n";
output<<"ZONE F=POINT, I="<<XDIM<<", J="<<YDIM<<", K="<<ZDIM/GPU_N<<"\n";
for(int j = 0; j<YDIM; j++){
for(int i = 0; i<XDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = gin[(i+j*XDIM)+l *XDIM*YDIM];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
output<<i<<", "<<j<<", "<<(ZDIM/GPU_N*GPU)<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv[0][i+j*XDIM]<<","<<velAv[1][i+j*XDIM]<<", "<<velFluc[0][i+j*XDIM]<<","<<velFluc[1][i+j*XDIM]<<endl;
}}
for(int k = 1; k<ZDIM/GPU_N-1; k++){
for(int j = 0; j<YDIM; j++){
for(int i = 0; i<XDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = fin[(i+j*XDIM)+(k-1)*XDIM*YDIM+l*XDIM*YDIM*(ZDIM/GPU_N-2)];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
float m1 =-30.f*f[0]+-11.f*f[1]+-11.f*f[2]+-11.f*f[3]+-11.f*f[4]+8.f*f[5]+8.f*f[6]+8.f*f[7]+8.f*f[8]+-11.f*f[9]+8.f*f[10]+8.f*f[11]+8.f*f[12]+8.f*f[13]+-11.f*f[14]+8.f*f[15]+8.f*f[16]+8.f*f[17]+8.f*f[18];
//float m6 = -4.f*f[2]+4.f*f[4]+f[5]+f[6]+-f[7]+-f[8]+f[11]+-f[13]+f[16]+-f[18];
float m10 =-4.f*f[1]+2.f*f[2]+-4.f*f[3]+2.f*f[4]+f[5]+f[6]+f[7]+f[8]+2.f*f[9]+f[10]+-2.f*f[11]+f[12]+-2.f*f[13]+2.f*f[14]+f[15]+-2.f*f[16]+f[17]+-2.f*f[18];
float m16 = f[5]+-f[6]+-f[7]+f[8]-f[10]+f[12]+-f[15]+f[17];
float m[19] = {0};
Moments_host(f,m);
float omega = 1.0f/(3.0f*(UMAX*OBSTR1*2.f/RE)+0.5f);
//float omega2 = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f));
float PI11 = -0.026315789f*m[ 1]-0.5f *omega*m[ 9];
float PI22 = -0.026315789f*m[ 1]+0.25f*omega*(m[ 9]-3.0f*m[11]);
float PI33 = -0.026315789f*m[ 1]+0.25f*omega*(m[ 9]+3.0f*m[11]);
float PI12 = -1.5f*omega*m[13];
float PI23 = -1.5f*omega*m[14];
float PI13 = -1.5f*omega*m[15];
//we know Smag on coarse mesh
float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
//InvertMoments_host(f,m);
//u = m[3];
//v = m[5];
//w = m[7];
//m6 = m[6 ];
//m10= m[10];
//m16= m[16];
int z = (ZDIM/GPU_N*GPU+k);
output<<i<<", "<<j<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv[0][i+j*XDIM+k*XDIM*YDIM]<<","<<velAv[1][i+j*XDIM+k*XDIM*YDIM]<<", "
<<velFluc[0][i+j*XDIM+k*XDIM*YDIM]<<","<<Smag<<endl;
//<<velFluc[0][i+j*XDIM+k*XDIM*YDIM]<<","<<velFluc[1][i+j*XDIM+k*XDIM*YDIM]<<endl;
}}}
for(int j = 0; j<YDIM; j++){
for(int i = 0; i<XDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = hin[(i+j*XDIM)+l *XDIM*YDIM];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
output<<i<<", "<<j<<", "<<(ZDIM/GPU_N*(GPU+1)-1)<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv[0][i+j*XDIM+(ZDIM-1)*XDIM*YDIM]<<","<<velAv[1][i+j*XDIM+(ZDIM/GPU_N-1)*XDIM*YDIM]<<", "
<<velFluc[0][i+j*XDIM+(ZDIM-1)*XDIM*YDIM]<<","<<velFluc[1][i+j*XDIM+(ZDIM/GPU_N-1)*XDIM*YDIM]<<endl;
}}
}
void WriteResultsLR(ofstream &output, float *fin, float *gin, float *hin, float **velAv,
float **velFluc, float omega, int GPU_N, int GPU)
{
float f[19];
output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"velAv[0]\",\"velAv[1]\",\"ufluc\",\"vfluc\"\n";
output<<"ZONE F=POINT, I="<<XLRDIM<<", J="<<YLRDIM<<", K="<<ZLRDIM/GPU_N<<"\n";
for(int j = 0; j<YLRDIM; j++){
for(int i = 0; i<XLRDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = gin[(i+j*XLRDIM)+l *XLRDIM*YLRDIM];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
float x = LRX0+LRFACTOR*i;
float y = LRY0+LRFACTOR*j;
float z = LRZ0+LRFACTOR*(ZLRDIM/GPU_N*GPU);
output<<x<<", "<<y<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv[0][i+j*XLRDIM]<<","<<velAv[1][i+j*XLRDIM]<<", "<<velFluc[0][i+j*XLRDIM]<<","<<velFluc[1][i+j*XLRDIM]<<endl;
}}
for(int k = 1; k<ZLRDIM/GPU_N-1; k++){
for(int j = 0; j<YLRDIM; j++){
for(int i = 0; i<XLRDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = fin[(i+j*XLRDIM)+(k-1)*XLRDIM*YLRDIM+l*XLRDIM*YLRDIM*(ZLRDIM/GPU_N-2)];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
float x = LRX0+LRFACTOR*i;
float y = LRY0+LRFACTOR*j;
float z = LRZ0+LRFACTOR*(ZLRDIM/GPU_N*GPU+k);
float m[19] = {0};
Moments_host(f,m);
float omega = 1.0f/(3.0f*(UMAX*OBSTR1*2.f/RE)+0.5f);
//float omega2 = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f));
float PI11 = -0.026315789f*m[ 1]-0.5f *omega*m[ 9];
float PI22 = -0.026315789f*m[ 1]+0.25f*omega*(m[ 9]-3.0f*m[11]);
float PI33 = -0.026315789f*m[ 1]+0.25f*omega*(m[ 9]+3.0f*m[11]);
float PI12 = -1.5f*omega*m[13];
float PI23 = -1.5f*omega*m[14];
float PI13 = -1.5f*omega*m[15];
//we know Smag on coarse mesh
float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
output<<x<<", "<<y<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv [0][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<velAv [1][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<", "
<<velFluc[0][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<Smag<<endl;
//<<velFluc[0][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<velFluc[1][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<endl;
}}}
for(int j = 0; j<YLRDIM; j++){
for(int i = 0; i<XLRDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = hin[(i+j*XLRDIM)+l *XLRDIM*YLRDIM];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
float x = LRX0+LRFACTOR*i;
float y = LRY0+LRFACTOR*j;
float z = LRZ0+LRFACTOR*(ZLRDIM/GPU_N*(GPU+1)-1);
output<<x<<", "<<y<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv[0][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<","<<velAv[1][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<", "
<<velFluc[0][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<","<<velFluc[1][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<endl;
}}
}
void WriteForces(float **F, ofstream &output, int ForceTime, int level)
{
float ref = UMAX*UMAX*ZDIM*OBSTR1;
if(level > 0)
ref *= LRLEVEL*LRLEVEL;
for(int i = 0; i<ForceTime; i++){
output<<i+STARTF<<", "<<F[0][i]/ref<<", "<<F[1][i]/ref<<", "<<F[2][i]/ref<<endl;
}
}
void WriteInputs(ostream &output, float omega, float omegaLR, int GPU_per_node)
{
output<<"Base domain size \t"<<XDIM<<"x"<<YDIM<<"x"<<ZDIM<<endl;
output<<"Base blocksize: \t"<<BLOCKSIZEX<<"x"<<BLOCKSIZEY<<"x"<<BLOCKSIZEZ<<endl;
output<<"Obst1 location: \t("<<OBSTX1<<","<<OBSTY1<<","<<OBSTZ1<<")"<<endl;
output<<"Obst1 radius: \t"<<OBSTR1<<endl;
output<<"Obst2 location: \t("<<OBSTX2<<","<<OBSTY2<<","<<OBSTZ2<<")"<<endl;
output<<"Obst2 radius: \t"<<OBSTR2<<endl;
output<<"RE: \t"<<RE<<endl;
output<<"UMAX: \t"<<UMAX<<endl;
output<<"omega \t: "<<omega<<endl;
output<<"TMAX: \t"<<TMAX<<endl;
output<<"STARTF: \t"<<STARTF<<endl;
output<<"START_VELAV: \t"<<START_VELAV<<endl;
output<<"START_VELFLUC: \t"<<START_VELFLUC<<endl;
output<<"REFINEMENT: \t"<<REFINEMENT<<endl;
output<<"MODEL: \t"<<MODEL<<endl;
output<<"Smagorinsky LES: \t"<<SmagLES<<endl;
output<<"CS: \t"<<CS<<endl;
output<<"LR domain size \t"<<XLRDIM<<"x"<<YLRDIM<<"x"<<ZLRDIM<<endl;
output<<"LR factor \t"<<LRFACTOR<<endl;
output<<"LR location \t"<<LRX0<<"x"<<LRY0<<"x"<<LRZ0<<endl;
output<<"LR blocksize: \t"<<BLOCKSIZELRX<<"x"<<BLOCKSIZELRY<<"x"<<BLOCKSIZELRZ<<endl;
output<<"omega in LR \t: "<<omegaLR<<endl;
output<<"GPUs per node \t: "<<GPU_per_node<<endl;
}
int main(int argc, char *argv[])
{
int GPU_N; cudaGetDeviceCount(&GPU_N);
cout<<"number of GPUs: "<<GPU_N<<endl;
ofstream output; ofstream outputForce; ofstream outputInputs;
string FileName = CASENAME;
output.open ((FileName+".dat").c_str());
outputForce.open ((FileName+".force").c_str());
outputInputs.open ((FileName+".inputs").c_str());
//size_t memsize, memsize2;
size_t pitch = 2;
while(pitch<XDIM)
pitch=pitch*2;
pitch *= sizeof(float);//pitch*sizeof(float);
size_t pitch_e = pitch/sizeof(float);
cout<<"Pitch (in elements): "<<pitch/sizeof(float)<<endl;
float CharLength = OBSTR1*2.f;
float omega = 1.0f/(3.0f*(UMAX*CharLength/RE)+0.5f);
float omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f));
if(LRFACTOR == 0.25f){
omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omegaLR-1.0f));
}
if(LRFACTOR == 0.125f){
omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omegaLR-1.0f));
omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omegaLR-1.0f));
}
float SF_cf = omega*(1.0f-omegaLR)/((1.0f-omega)*omegaLR/LRFACTOR);
float SF_fc = 1.f/SF_cf;
cout<<SF_cf<<endl;
WriteInputs(outputInputs,omega,omegaLR,GPU_N);
WriteInputs(cout,omega,omegaLR,GPU_N);
if(abs(LRFACTOR-1.f/LRLEVEL)>0.001f && REFINEMENT == 1){
cout<<"LRLEVEL and LRFACTOR don't match! Exiting..."<<endl;
return 0;
}
int zInner = ZDIM/GPU_N-2; //excluding halo
int ForceTime = max(0,TMAX-STARTF);
dim3 threads(BLOCKSIZEX, BLOCKSIZEY, BLOCKSIZEZ);
//2 halo layers per GPU (for 2 GPUs)
dim3 grid (((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),(zInner)/BLOCKSIZEZ);
dim3 g_grid(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),1);
cudaStream_t stream_halo[GPU_N];
cudaStream_t stream_inner[GPU_N];
//data pointers as 3D array (GPUxCoord)
float *f_h[GPU_N], *g_h[GPU_N], *h_h[GPU_N];
float *f_d[GPU_N][2], *g_d[GPU_N][2], *h_d[GPU_N][2];
float *g_temp[GPU_N], *h_temp[GPU_N];
float *F_h[GPU_N][3];
float *F_d[GPU_N][3];
float *F_total[3];
float *velAv_h[GPU_N][3],*velFluc_h[GPU_N][3];
float *velAv_d[GPU_N][3],*velFluc_d[GPU_N][3];
for(int i = 0; i<3; i++)
F_total[i] = (float *)malloc(ForceTime*sizeof(float));
for(int i=0;i<3;i++)
for(int j=0;j<(ForceTime);j++)
F_total[i][j] = 0;
//Malloc and Initialize for each GPU
for(int n = 0; n<GPU_N; n++){
f_h [n] = (float *)malloc(XDIM*YDIM*zInner*19*sizeof(float));
g_h [n] = (float *)malloc(XDIM*YDIM* 19*sizeof(float));
h_h [n] = (float *)malloc(XDIM*YDIM* 19*sizeof(float));
for(int i = 0; i<3; i++){
F_h [n][i] = (float *)malloc(ForceTime*sizeof(float));
velAv_h [n][i] = (float *)malloc(XDIM*YDIM*ZDIM/GPU_N*sizeof(float));
velFluc_h[n][i] = (float *)malloc(XDIM*YDIM*ZDIM/GPU_N*sizeof(float));
}
cudaSetDevice(n);
cudaStreamCreate(&stream_halo[n]);
cudaStreamCreate(&stream_inner[n]);
for(int m = 0; m<GPU_N; m++)
if(m != n) cudaDeviceEnablePeerAccess(m,0);
for(int i = 0; i<2; i++){
cudaMalloc((void **) &f_d[n][i], pitch_e*YDIM*zInner*19*sizeof(float));
cudaMalloc((void **) &g_d[n][i], pitch_e*YDIM* 19*sizeof(float));
cudaMalloc((void **) &h_d[n][i], pitch_e*YDIM* 19*sizeof(float));
}
cudaMalloc((void **) & g_temp[n], pitch_e*YDIM* 19*sizeof(float));
cudaMalloc((void **) & h_temp[n], pitch_e*YDIM* 19*sizeof(float));
for(int i = 0; i<3; i++){
cudaMalloc((void **) & F_d [n][i], (ForceTime)*sizeof(float));
cudaMalloc((void **) & velAv_d [n][i], pitch_e*YDIM*ZDIM/GPU_N*sizeof(float));
cudaMalloc((void **) & velFluc_d[n][i], pitch_e*YDIM*ZDIM/GPU_N*sizeof(float));
}
//initialize host f_inner
for (int i = 0; i < XDIM*YDIM*zInner*19; i++)
f_h[n][i] = 0;
//initialize host g,h
for (int i = 0; i < XDIM*YDIM*19; i++){
g_h[n][i] = 0;
h_h[n][i] = 0;
}
for(int i=0;i<3;i++){
for(int j=0;j<(ForceTime);j++)
F_h[n][i][j] = 0;
for (int j = 0; j < XDIM*YDIM*ZDIM/GPU_N; j++){
velAv_h [n][i][j] = 0;
velFluc_h[n][i][j] = 0;
}
}
for(int i = 0; i<2; i++){
cudaMemcpy2D(f_d[n][i],pitch,f_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*zInner*19,cudaMemcpyHostToDevice);
cudaMemcpy2D(g_d[n][i],pitch,g_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM *19,cudaMemcpyHostToDevice);
cudaMemcpy2D(h_d[n][i],pitch,h_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM *19,cudaMemcpyHostToDevice);
}
for(int i = 0; i<3; i++){
cudaMemcpy2D(velAv_d [n][i],pitch,velAv_h [n][i],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM/GPU_N,cudaMemcpyHostToDevice);
cudaMemcpy2D(velFluc_d[n][i],pitch,velFluc_h[n][i],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM/GPU_N,cudaMemcpyHostToDevice);
cudaMemcpy(F_d[n][i],F_h[n][i],sizeof(float)*(ForceTime),cudaMemcpyHostToDevice);
}
//initialization kernels
for(int i = 0; i<2; i++){
initialize<<< grid,threads>>>(f_d[n][i],pitch_e,zInner,GPU_N);
initialize<<<g_grid,threads>>>(g_d[n][i],pitch_e, 1,GPU_N);
initialize<<<g_grid,threads>>>(h_d[n][i],pitch_e, 1,GPU_N);
}
initialize<<<g_grid,threads>>>(g_temp[n],pitch_e, 1,GPU_N);
initialize<<<g_grid,threads>>>(h_temp[n],pitch_e, 1,GPU_N);
}//end Malloc and Initialize
//data pointers as 3D array (GPUxCoord)
float *f_LR_h[GPU_N], *g_LR_h[GPU_N], *h_LR_h[GPU_N];
float *f_LR_d[GPU_N][2], *g_LR_d[GPU_N][2], *h_LR_d[GPU_N][2];
float *g_LR_temp[GPU_N], *h_LR_temp[GPU_N];
float *velAv_LR_h[GPU_N][3],*velFluc_LR_h[GPU_N][3];
float *velAv_LR_d[GPU_N][3],*velFluc_LR_d[GPU_N][3];
float *f_interp[GPU_N], *g_interp[GPU_N], *h_interp[GPU_N], *g_interp_temp[GPU_N], *h_interp_temp[GPU_N];
float *interp_h[GPU_N];
size_t pitchLR = 2;
while(pitchLR<XLRDIM)
pitchLR=pitchLR*2;
pitchLR = pitchLR*sizeof(float);
size_t pitchLR_e = pitchLR/sizeof(float);
cout<<"LR Pitch (in elements): "<<pitchLR_e<<endl;
size_t pitchInterp = 2;
while(pitchInterp<XLRDIM*LRFACTOR+1)
pitchInterp=pitchInterp*2;
pitchInterp = pitchInterp*sizeof(float);
size_t pitchInterp_e = pitchInterp/sizeof(float);
cout<<"Interp Pitch (in elements): "<<pitchInterp_e<<endl;
int zLRInner = ZLRDIM/GPU_N-2;
dim3 LR_threads(BLOCKSIZELRX, BLOCKSIZELRY, BLOCKSIZELRZ);
dim3 LR_grid(((XLRDIM+BLOCKSIZELRX-1)/BLOCKSIZELRX),((YLRDIM+BLOCKSIZELRY-1)/BLOCKSIZELRY),(zLRInner)/BLOCKSIZELRZ);
dim3 g_LR_grid(((XLRDIM+BLOCKSIZELRX-1)/BLOCKSIZELRX),((YLRDIM+BLOCKSIZELRY-1)/BLOCKSIZELRY),1);
dim3 Interp_threads(BLOCKSIZEINTERP, LRLEVEL, LRLEVEL);
dim3 Interp_grid(((XLRDIM+BLOCKSIZEINTERP-1)/BLOCKSIZEINTERP),((YLRDIM+LRLEVEL-1)/LRLEVEL),ZLRDIM/LRLEVEL/GPU_N);
cout<<((XLRDIM+BLOCKSIZEINTERP-1)/BLOCKSIZEINTERP)<<", "<<((YLRDIM+LRLEVEL-1)/LRLEVEL)<<", "<<ZLRDIM/LRLEVEL/GPU_N<<endl;
dim3 Interp_grid_c(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),(ZDIM/GPU_N)/BLOCKSIZEZ);
//setup LR
if(REFINEMENT == 1){
for(int n = 0; n<GPU_N; n++){
f_LR_h [n] = (float *)malloc(XLRDIM*YLRDIM*zLRInner*19*sizeof(float));
g_LR_h [n] = (float *)malloc(XLRDIM*YLRDIM* 19*sizeof(float));
h_LR_h [n] = (float *)malloc(XLRDIM*YLRDIM* 19*sizeof(float));
interp_h [n] = (float *)malloc((XLRDIM*LRFACTOR+1)*(YLRDIM*LRFACTOR+1)*zInner*9*sizeof(float));
for(int i = 0; i<3; i++){
velAv_LR_h [n][i] = (float *)malloc(XLRDIM*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
velFluc_LR_h[n][i] = (float *)malloc(XLRDIM*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
}
cudaSetDevice(n);
for(int i = 0; i<2; i++){
cudaMalloc((void **) &f_LR_d[n][i], pitchLR_e*YLRDIM*zLRInner*19*sizeof(float));
cudaMalloc((void **) &g_LR_d[n][i], pitchLR_e*YLRDIM* 19*sizeof(float));
cudaMalloc((void **) &h_LR_d[n][i], pitchLR_e*YLRDIM* 19*sizeof(float));
}
cudaMalloc((void **) & g_LR_temp[n], pitchLR_e*YLRDIM* 19*sizeof(float));
cudaMalloc((void **) & h_LR_temp[n], pitchLR_e*YLRDIM* 19*sizeof(float));
cudaMalloc((void **) & f_interp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*zInner*9*sizeof(float));
cudaMalloc((void **) & g_interp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*9*sizeof(float));
cudaMalloc((void **) & h_interp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*9*sizeof(float));
cudaMalloc((void **) & g_interp_temp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*9*sizeof(float));
cudaMalloc((void **) & h_interp_temp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*9*sizeof(float));
for(int i = 0; i<3; i++){
cudaMalloc((void **) & velAv_LR_d [n][i], pitchLR_e*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
cudaMalloc((void **) & velFluc_LR_d[n][i], pitchLR_e*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
}
for (int i = 0; i < XLRDIM*YLRDIM*zLRInner*19; i++)
f_LR_h[n][i] = 0;
//initialize host g,h
for (int i = 0; i < XLRDIM*YLRDIM*19; i++){
g_LR_h[n][i] = 0;
h_LR_h[n][i] = 0;
}
for(int i=0;i<3;i++){
for (int j = 0; j < XLRDIM*YLRDIM*ZLRDIM/GPU_N; j++){
velAv_LR_h [n][i][j] = 0;
velFluc_LR_h[n][i][j] = 0;
}
}
for(int i = 0; i<2; i++){
cudaMemcpy2D(f_LR_d[n][i],pitchLR,f_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*zLRInner*19,cudaMemcpyHostToDevice);
cudaMemcpy2D(g_LR_d[n][i],pitchLR,g_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM *19,cudaMemcpyHostToDevice);
cudaMemcpy2D(h_LR_d[n][i],pitchLR,h_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM *19,cudaMemcpyHostToDevice);
}
for(int i = 0; i<3; i++){
cudaMemcpy2D(velAv_LR_d [n][i],pitchLR,velAv_LR_h [n][i],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,cudaMemcpyHostToDevice);
cudaMemcpy2D(velFluc_LR_d[n][i],pitchLR,velFluc_LR_h[n][i],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,cudaMemcpyHostToDevice);
}
//initialization kernels
for(int i = 0; i<2; i++){
initializeLR<<< LR_grid,LR_threads>>>(f_LR_d[n][i],pitchLR_e,zLRInner,GPU_N);
initializeLR<<<g_LR_grid,LR_threads>>>(g_LR_d[n][i],pitchLR_e, 1,GPU_N);
initializeLR<<<g_LR_grid,LR_threads>>>(h_LR_d[n][i],pitchLR_e, 1,GPU_N);
}
initializeLR<<<g_LR_grid,LR_threads>>>(g_LR_temp[n],pitchLR_e, 1,GPU_N);
initializeLR<<<g_LR_grid,LR_threads>>>(h_LR_temp[n],pitchLR_e, 1,GPU_N);
}//end of GPU loop for malloc and initialize for LR
}//end of LR malloc and initialize
cudaFuncSetCacheConfig(InterpCF,cudaFuncCachePreferShared);
int A = 0; int B = 1; int C = 0; int D = 1;
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
size_t mem_avail, mem_total;
cudaMemGetInfo(&mem_avail,&mem_total);
cout<<"Device memory used for dev"<<n<<" : "<<(mem_total-mem_avail)*pow(10,-9)<<" GB\n";
cout<<"Device memory available for dev"<<n<<" : "<<(mem_avail)*pow(10,-9)<<" GB\n";
}
struct timeval tdr0,tdr1;
double restime;
cudaDeviceSynchronize();
gettimeofday (&tdr0,NULL);
//time loop
for(int t = 0; t<TMAX; t++)
{
//copy temporary array for top and bottom on coarse mesh to neighbor GPU. Only transfering 5 distbs
for(int n = 0; n<GPU_N; n++)
cudaMemcpyPeerAsync(&h_temp[n][pitch_e*YDIM*14],n,&g_d[ (n+1)%GPU_N][A][pitch_e*YDIM*14], (n+1)%GPU_N,pitch_e*YDIM*sizeof(float)*5,stream_halo[n]);
for(int n = 0; n<GPU_N; n++)
cudaMemcpyPeerAsync(&g_temp[n][pitch_e*YDIM*9],n,&h_d[abs(n-1)%GPU_N][A][pitch_e*YDIM*9],abs(n-1)%GPU_N,pitch_e*YDIM*sizeof(float)*5,stream_halo[n]);
//compute inner nodes on coarse mesh
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
update_inn<<<grid,threads,0,stream_inner[n]>>>(f_d[n][B],f_d[n][A],g_d[n][A], h_d[n][A],omega,pitch_e,n,zInner,velAv_d[n][0],velAv_d[n][1],velFluc_d[n][0],velFluc_d[n][1],F_d[n][0],F_d[n][1],F_d[n][2],t,(!REFINEMENT&&t>STARTF),f_interp[n],pitchInterp_e);
}
//synchronize halo stream before computing top and bottom nodes
for(int n = 0; n<GPU_N; n++)
cudaStreamSynchronize(stream_halo[n]);
//compute top and bottom nodes
for(int n = 0; n<GPU_N; n++)
{
cudaSetDevice(n);
update_top<<<g_grid, threads, 0, stream_halo [n]>>>(h_d[n][B],h_d[n][A],f_d[n][A],h_temp[n],omega,pitch_e,n,zInner,F_d[n][0],F_d[n][1],F_d[n][2],t,(!REFINEMENT&&t>STARTF),h_interp[n],pitchInterp_e);
update_bot<<<g_grid, threads, 0, stream_halo [n]>>>(g_d[n][B],g_d[n][A],f_d[n][A],g_temp[n],omega,pitch_e,n,zInner,F_d[n][0],F_d[n][1],F_d[n][2],t,(!REFINEMENT&&t>STARTF),g_interp[n],pitchInterp_e);
}
//cudaDeviceSynchronize();
swap(A,B);
if(REFINEMENT == 1){
int flag_F = 0;
for(int i = 0; i<LRLEVEL; i++){
if(t>STARTF && i == 0) flag_F = 1;
else flag_F = 0;
for(int n = 0; n<GPU_N; n++){
cudaMemcpyPeerAsync(&h_LR_temp[n][pitchLR_e*YLRDIM*14],n,&g_LR_d[ (n+1)%GPU_N][C][pitchLR_e*YLRDIM*14], (n+1)%GPU_N,pitchLR_e*YLRDIM*sizeof(float)*5,stream_halo[n]);
cudaMemcpyPeerAsync(&g_LR_temp[n][pitchLR_e*YLRDIM*9 ],n,&h_LR_d[abs(n-1)%GPU_N][C][pitchLR_e*YLRDIM*9 ],abs(n-1)%GPU_N,pitchLR_e*YLRDIM*sizeof(float)*5,stream_halo[n]);
}
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
update_inn_LR<<<LR_grid,LR_threads,0,stream_inner[n]>>>(f_LR_d[n][D],f_LR_d[n][C],g_LR_d[n][C], h_LR_d[n][C],omegaLR,pitchLR_e,n,zLRInner,velAv_LR_d[n][0],velAv_LR_d[n][1],velFluc_LR_d[n][0],velFluc_LR_d[n][1],F_d[n][0],F_d[n][1],F_d[n][2],t,flag_F);
}
for(int n = 0; n<GPU_N; n++)
cudaStreamSynchronize(stream_halo[n]);
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
update_top_LR<<<g_LR_grid,LR_threads,0,stream_halo[n]>>>(h_LR_d[n][D],h_LR_d[n][C],f_LR_d[n][C],h_LR_temp[n],omegaLR,pitchLR_e,n,zLRInner,F_d[n][0],F_d[n][1],F_d[n][2],t,flag_F);
update_bot_LR<<<g_LR_grid,LR_threads,0,stream_halo[n]>>>(g_LR_d[n][D],g_LR_d[n][C],f_LR_d[n][C],g_LR_temp[n],omegaLR,pitchLR_e,n,zLRInner,F_d[n][0],F_d[n][1],F_d[n][2],t,flag_F);
}
if(i == LRLEVEL-1)
{
for(int n = 0; n<GPU_N; n++)
//cudaMemcpyPeerAsync(&h_interp_temp[n][0],n,&g_interp[ (n+1)%GPU_N][0], (n+1)%GPU_N,pitchInterp_e*(YLRDIM*LRFACTOR+1)*sizeof(float)*9,stream_halo[n]);
for(int n = 0; n<GPU_N; n++)
cudaMemcpyPeerAsync(&g_interp_temp[n][0],n,&h_interp[abs(n-1)%GPU_N][0],abs(n-1)%GPU_N,pitchInterp_e*(YLRDIM*LRFACTOR+1)*sizeof(float)*9,stream_halo[n]);
}
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
cudaDeviceSynchronize();
}
flag_F = 0;
swap(C,D);
}
//interp from coarse grid
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
InterpCF<<<Interp_grid,Interp_threads,0,stream_inner[n]>>>(f_LR_d[n][C],g_LR_d[n][C],h_LR_d[n][C],pitchLR_e,f_interp[n],g_interp[n],h_interp[n],g_interp_temp[n],pitchInterp_e,SF_cf,omega,zInner,zLRInner);
//cudaDeviceSynchronize();
}
//interp from fine grid
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
cudaMemcpyPeerAsync(&h_LR_temp[n][0],n,&g_LR_d[ (n+1)%GPU_N][C][0], (n+1)%GPU_N,pitchLR_e*YLRDIM*sizeof(float)*19,stream_halo[n]);
}
for(int n = 0; n<GPU_N; n++)
cudaStreamSynchronize(stream_halo[n]);
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
InterpFC<<<Interp_grid_c,threads,0,stream_halo[n]>>>(f_d[n][A],g_d[n][A],h_d[n][A],f_LR_d[n][C],h_LR_d[n][C],h_LR_temp[n],pitch_e,pitchLR_e,SF_fc,omegaLR,zInner,zLRInner);
}
}//end refinement
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
cudaDeviceSynchronize();
}
}//end time loop
cudaDeviceSynchronize();
gettimeofday (&tdr1,NULL);
timeval_subtract (&restime, &tdr1, &tdr0);
int Nodes;
Nodes = XDIM*YDIM*ZDIM;
if (REFINEMENT == 1)
Nodes += XLRDIM*YLRDIM*ZLRDIM*LRLEVEL;
cout<<"Time taken for main kernel: "<<restime<<" ("
<<double(Nodes*double(TMAX/1000000.f))/restime<<"MLUPS)\n";
//D2H Memcpy and write results
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
cudaMemcpy2D(f_h[n],XDIM*sizeof(float),f_d[n][A],pitch,XDIM*sizeof(float),YDIM*zInner*19,cudaMemcpyDeviceToHost);
cudaMemcpy2D(g_h[n],XDIM*sizeof(float),g_d[n][A],pitch,XDIM*sizeof(float),YDIM *19,cudaMemcpyDeviceToHost);
cudaMemcpy2D(h_h[n],XDIM*sizeof(float),h_d[n][A],pitch,XDIM*sizeof(float),YDIM *19,cudaMemcpyDeviceToHost);
for(int i = 0; i<3; i++){
cudaMemcpy2D( velAv_h[n][i],XDIM*sizeof(float),velAv_d[n][i],pitch,XDIM*sizeof(float),YDIM*ZDIM/GPU_N,cudaMemcpyDeviceToHost);
cudaMemcpy2D(velFluc_h[n][i],XDIM*sizeof(float),velFluc_d[n][i],pitch,XDIM*sizeof(float),YDIM*ZDIM/GPU_N,cudaMemcpyDeviceToHost);
cudaMemcpy(F_h[n][i],F_d[n][i],sizeof(float)*ForceTime,cudaMemcpyDeviceToHost);
}
WriteResults(output,f_h[n],g_h[n],h_h[n],velAv_h[n],velFluc_h[n],omega,GPU_N,n);
output<<endl;
for(int i=0;i<3;i++)
for(int j=0;j<ForceTime;j++)
F_total[i][j] += F_h[n][i][j];
for(int i = 0; i<2; i++){
cudaFree(f_d[n][i]);
cudaFree(g_d[n][i]);
cudaFree(h_d[n][i]);
}
cudaFree(f_d[n]);
cudaFree(g_d[n]);
cudaFree(h_d[n]);
cudaFree(g_temp[n]);
cudaFree(h_temp[n]);
for(int i=0;i<3;i++)
cudaFree(F_d[n][i]);
cudaFree(F_d[n]);
}//end Memcpy and write results
WriteForces(F_total,outputForce,ForceTime,REFINEMENT*LRLEVEL);
if(REFINEMENT == 1){
// output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"uAv\",\"vAv\",\"ufluc\",\"vfluc\"\n";
// output<<"ZONE F=POINT, I="<<XLRDIM<<", J="<<YLRDIM<<", K="<<ZLRDIM<<"\n";
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
cudaMemcpy2D(f_LR_h[n],XLRDIM*sizeof(float),f_LR_d[n][C],pitchLR,XLRDIM*sizeof(float),YLRDIM*zLRInner*19,cudaMemcpyDeviceToHost);
cudaMemcpy2D(g_LR_h[n],XLRDIM*sizeof(float),g_LR_d[n][C],pitchLR,XLRDIM*sizeof(float),YLRDIM *19,cudaMemcpyDeviceToHost);
cudaMemcpy2D(h_LR_h[n],XLRDIM*sizeof(float),h_LR_d[n][C],pitchLR,XLRDIM*sizeof(float),YLRDIM *19,cudaMemcpyDeviceToHost);
//cudaMemcpy2D(interp_h[n],(XLRDIM*LRFACTOR+1)*sizeof(float),f_interp[n],pitchInterp,(XLRDIM*LRFACTOR+1)*sizeof(float),(YLRDIM*LRFACTOR+1)*zInner*9,cudaMemcpyDeviceToHost);
for(int i = 0; i<3; i++){
cudaMemcpy2D( velAv_LR_h[n][i],XLRDIM*sizeof(float),velAv_LR_d[n][i],pitchLR,XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,cudaMemcpyDeviceToHost);
cudaMemcpy2D(velFluc_LR_h[n][i],XLRDIM*sizeof(float),velFluc_LR_d[n][i],pitchLR,XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,cudaMemcpyDeviceToHost);
}
WriteResultsLR(output,f_LR_h[n],g_LR_h[n],h_LR_h[n],velAv_LR_h[n],velFluc_LR_h[n],omegaLR,GPU_N,n);
output<<endl;
for(int i = 0; i<2; i++){
cudaFree(f_LR_d[n][i]);
cudaFree(g_LR_d[n][i]);
cudaFree(h_LR_d[n][i]);
}
cudaFree(f_LR_d[n]);
cudaFree(g_LR_d[n]);
cudaFree(h_LR_d[n]);
cudaFree(g_LR_temp[n]);
cudaFree(h_LR_temp[n]);
}
}
return 0;
}
|
7eba9a06948546ff03688d41c067f7e6eb0385a8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <cassert>
#include <chrono>
#include "../constants_bench_3d.h"
#include <hip/hip_runtime.h>
#include <iostream>
#include "../../../../utils.h"
// Dataset
constexpr auto N = mean_shift::cuda::bench_3d::case_5000::N;
constexpr auto D = mean_shift::cuda::bench_3d::D;
constexpr auto M = mean_shift::cuda::bench_3d::M;
const auto PATH_TO_DATA = mean_shift::cuda::bench_3d::case_5000::PATH_TO_DATA;
const auto PATH_TO_CENTROIDS = mean_shift::cuda::bench_3d::case_5000::PATH_TO_CENTROIDS;
const auto LOG_SM = mean_shift::cuda::bench_3d::case_5000::LOG_SM;
// Hyperparams
constexpr auto RADIUS = mean_shift::cuda::bench_3d::case_5000::RADIUS;
constexpr auto NUM_ITER = mean_shift::cuda::bench_3d::NUM_ITER;
constexpr auto DBL_SIGMA_SQ = mean_shift::cuda::bench_3d::case_5000::DBL_SIGMA_SQ;
constexpr auto MIN_DISTANCE = mean_shift::cuda::bench_3d::case_5000::MIN_DISTANCE;
// Device
constexpr auto THREADS = mean_shift::cuda::bench_3d::THREADS;
constexpr auto BLOCKS = mean_shift::cuda::bench_3d::case_5000::BLOCKS;
constexpr auto TILE_WIDTH = mean_shift::cuda::bench_3d::TILE_WIDTH;
// Benchmarking
constexpr auto NUM_TRIALS = mean_shift::cuda::bench_3d::NUM_TRIALS;
__global__ void mean_shift_tiling(const float* data, float* data_next) {
// Shared memory allocation
__shared__ float local_data[TILE_WIDTH * D];
__shared__ float valid_data[TILE_WIDTH];
// A few convenient variables
int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
int row = tid * D;
int local_row = threadIdx.x * D;
float new_position[D] = {0.};
float tot_weight = 0.;
// Load data in shared memory
for (int t = 0; t < BLOCKS; ++t) {
int tid_in_tile = t * TILE_WIDTH + threadIdx.x;
if (tid_in_tile < N) {
int row_in_tile = tid_in_tile * D;
for (int j = 0; j < D; ++j) {
local_data[local_row + j] = data[row_in_tile + j];
}
valid_data[threadIdx.x] = 1;
}
else {
for (int j = 0; j < D; ++j) {
local_data[local_row + j] = 0;
valid_data[threadIdx.x] = 0;
}
}
__syncthreads();
for (int i = 0; i < TILE_WIDTH; ++i) {
int local_row_tile = i * D;
float valid_radius = RADIUS * valid_data[i];
float sq_dist = 0.;
for (int j = 0; j < D; ++j) {
sq_dist += (data[row + j] - local_data[local_row_tile + j]) * (data[row + j] - local_data[local_row_tile + j]);
}
if (sq_dist <= valid_radius) {
float weight = expf(-sq_dist / DBL_SIGMA_SQ);
for (int j = 0; j < D; ++j) {
new_position[j] += (weight * local_data[local_row_tile + j]);
}
tot_weight += (weight * valid_data[i]);
}
}
__syncthreads();
}
if (tid < N) {
for (int j = 0; j < D; ++j) {
data_next[row + j] = new_position[j] / tot_weight;
}
}
return;
}
double run_once() {
// Load data
std::array<float, N * D> data = mean_shift::cuda::utils::load_csv<N, D>(PATH_TO_DATA, ',');
std::array<float, N * D> data_next {};
float *dev_data;
float *dev_data_next;
// Allocate GPU memory
size_t data_bytes = N * D * sizeof(float);
hipMalloc(&dev_data, data_bytes);
hipMalloc(&dev_data_next, data_bytes);
// Copy to GPU memory
hipMemcpy(dev_data, data.data(), data_bytes, hipMemcpyHostToDevice);
hipMemcpy(dev_data_next, data_next.data(), data_bytes, hipMemcpyHostToDevice);
// Run mean shift clustering
auto start = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < NUM_ITER; ++i) {
hipLaunchKernelGGL(( mean_shift_tiling), dim3(BLOCKS), dim3(THREADS), 0, 0, dev_data, dev_data_next);
hipDeviceSynchronize();
mean_shift::cuda::utils::swap(dev_data, dev_data_next);
}
hipMemcpy(data.data(), dev_data, data_bytes, hipMemcpyDeviceToHost);
const auto centroids = mean_shift::cuda::utils::reduce_to_centroids<N, D>(data, MIN_DISTANCE);
auto end = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
// Check if correct number
assert(centroids.size() == M);
return duration;
}
int main() {
std::array<double, NUM_TRIALS> exec_times;
for (auto i = 0; i < NUM_TRIALS; ++i)
exec_times[i] = run_once();
mean_shift::cuda::utils::write_csv<double, NUM_TRIALS>(exec_times, LOG_SM, ',');
return 0;
}
|
7eba9a06948546ff03688d41c067f7e6eb0385a8.cu
|
#include <cassert>
#include <chrono>
#include "../constants_bench_3d.h"
#include <cuda.h>
#include <iostream>
#include "../../../../utils.h"
// Dataset
constexpr auto N = mean_shift::cuda::bench_3d::case_5000::N;
constexpr auto D = mean_shift::cuda::bench_3d::D;
constexpr auto M = mean_shift::cuda::bench_3d::M;
const auto PATH_TO_DATA = mean_shift::cuda::bench_3d::case_5000::PATH_TO_DATA;
const auto PATH_TO_CENTROIDS = mean_shift::cuda::bench_3d::case_5000::PATH_TO_CENTROIDS;
const auto LOG_SM = mean_shift::cuda::bench_3d::case_5000::LOG_SM;
// Hyperparams
constexpr auto RADIUS = mean_shift::cuda::bench_3d::case_5000::RADIUS;
constexpr auto NUM_ITER = mean_shift::cuda::bench_3d::NUM_ITER;
constexpr auto DBL_SIGMA_SQ = mean_shift::cuda::bench_3d::case_5000::DBL_SIGMA_SQ;
constexpr auto MIN_DISTANCE = mean_shift::cuda::bench_3d::case_5000::MIN_DISTANCE;
// Device
constexpr auto THREADS = mean_shift::cuda::bench_3d::THREADS;
constexpr auto BLOCKS = mean_shift::cuda::bench_3d::case_5000::BLOCKS;
constexpr auto TILE_WIDTH = mean_shift::cuda::bench_3d::TILE_WIDTH;
// Benchmarking
constexpr auto NUM_TRIALS = mean_shift::cuda::bench_3d::NUM_TRIALS;
__global__ void mean_shift_tiling(const float* data, float* data_next) {
// Shared memory allocation
__shared__ float local_data[TILE_WIDTH * D];
__shared__ float valid_data[TILE_WIDTH];
// A few convenient variables
int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
int row = tid * D;
int local_row = threadIdx.x * D;
float new_position[D] = {0.};
float tot_weight = 0.;
// Load data in shared memory
for (int t = 0; t < BLOCKS; ++t) {
int tid_in_tile = t * TILE_WIDTH + threadIdx.x;
if (tid_in_tile < N) {
int row_in_tile = tid_in_tile * D;
for (int j = 0; j < D; ++j) {
local_data[local_row + j] = data[row_in_tile + j];
}
valid_data[threadIdx.x] = 1;
}
else {
for (int j = 0; j < D; ++j) {
local_data[local_row + j] = 0;
valid_data[threadIdx.x] = 0;
}
}
__syncthreads();
for (int i = 0; i < TILE_WIDTH; ++i) {
int local_row_tile = i * D;
float valid_radius = RADIUS * valid_data[i];
float sq_dist = 0.;
for (int j = 0; j < D; ++j) {
sq_dist += (data[row + j] - local_data[local_row_tile + j]) * (data[row + j] - local_data[local_row_tile + j]);
}
if (sq_dist <= valid_radius) {
float weight = expf(-sq_dist / DBL_SIGMA_SQ);
for (int j = 0; j < D; ++j) {
new_position[j] += (weight * local_data[local_row_tile + j]);
}
tot_weight += (weight * valid_data[i]);
}
}
__syncthreads();
}
if (tid < N) {
for (int j = 0; j < D; ++j) {
data_next[row + j] = new_position[j] / tot_weight;
}
}
return;
}
double run_once() {
// Load data
std::array<float, N * D> data = mean_shift::cuda::utils::load_csv<N, D>(PATH_TO_DATA, ',');
std::array<float, N * D> data_next {};
float *dev_data;
float *dev_data_next;
// Allocate GPU memory
size_t data_bytes = N * D * sizeof(float);
cudaMalloc(&dev_data, data_bytes);
cudaMalloc(&dev_data_next, data_bytes);
// Copy to GPU memory
cudaMemcpy(dev_data, data.data(), data_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(dev_data_next, data_next.data(), data_bytes, cudaMemcpyHostToDevice);
// Run mean shift clustering
auto start = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < NUM_ITER; ++i) {
mean_shift_tiling<<<BLOCKS, THREADS>>>(dev_data, dev_data_next);
cudaDeviceSynchronize();
mean_shift::cuda::utils::swap(dev_data, dev_data_next);
}
cudaMemcpy(data.data(), dev_data, data_bytes, cudaMemcpyDeviceToHost);
const auto centroids = mean_shift::cuda::utils::reduce_to_centroids<N, D>(data, MIN_DISTANCE);
auto end = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
// Check if correct number
assert(centroids.size() == M);
return duration;
}
int main() {
std::array<double, NUM_TRIALS> exec_times;
for (auto i = 0; i < NUM_TRIALS; ++i)
exec_times[i] = run_once();
mean_shift::cuda::utils::write_csv<double, NUM_TRIALS>(exec_times, LOG_SM, ',');
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.